diff --git a/spaces/17TheWord/vits-models/commons.py b/spaces/17TheWord/vits-models/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/vits-models/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Corel VideoStudio Ultimate 23.0.1.404 Crack How to Download and Install the Latest Version.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Corel VideoStudio Ultimate 23.0.1.404 Crack How to Download and Install the Latest Version.md deleted file mode 100644 index 1d616141ce07079f83261ac6daad22e2b967d347..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Corel VideoStudio Ultimate 23.0.1.404 Crack How to Download and Install the Latest Version.md +++ /dev/null @@ -1,165 +0,0 @@ -
-

Corel VideoStudio Ultimate 2023 Crack V23.0.1.404: A Powerful and Easy-to-Use Video Editor

-

Introduction

-

If you are looking for a video editing software that can help you create stunning videos with ease and creativity, you might want to check out Corel VideoStudio Ultimate 2023. This software is one of the best video editors on the market, with many features and tools that can suit any skill level and style. In this article, we will give you an overview of what Corel VideoStudio Ultimate 2023 is, what are its main features, how to download and install it, and how to use it for video editing.

-

Corel VideoStudio Ultimate 23.0.1.404 Crack


Download 🆗 https://byltly.com/2uKuZi



-

What is Corel VideoStudio Ultimate 2023?

-

Corel VideoStudio Ultimate 2023 is the latest version of the popular video editing software from Corel Corporation. It is a comprehensive and versatile video editor that can handle any type of video project, from simple slideshows to complex movies. It supports HD, 4K, and 360-degree video formats, as well as a wide range of audio and image formats. It also has a user-friendly interface that makes it easy to navigate and customize.

-

What are the main features of Corel VideoStudio Ultimate 2023?

-

Corel VideoStudio Ultimate 2023 has many features that can enhance your video editing experience and results. Some of the main features are:

- -

How to download and install Corel VideoStudio Ultimate 2023?

-

To download and install Corel VideoStudio Ultimate 2023, you need to follow these steps:

-
    -
  1. Download Corel Video Studio Full Version from this link. The file size is about 3.8 GB.
  2. -
  3. Disable your antivirus or Windows Defender.
  4. -
  5. Extract the file with the latest version of Winrar.
  6. -
  7. Run the Setup.exe file in the first folder setup\\ultimate.
  8. -
  9. If asked for the serial number, use this one: VU21U22-G5DLEN7-3W8XX86-AAWAUUY.
  10. -
  11. After that, don't run the software yet.
  12. -
  13. Install the second folder, Update.
  14. -
  15. Copy the PASMUTILITY.dll and vstudio.exe crack files from the Crack folder.
  16. -
  17. Paste and replace these files to C:\\Program Files\\Corel\\Corel VideoStudio 2023\\.
  18. -
  19. If a window appears to download, just close it.
  20. -
  21. If the effects content does not appear, install Contents64.msi from the Setup\\Common folder.
  22. -
-

How to use Corel VideoStudio Ultimate 2023 for video editing?

-

Now that you have installed Corel VideoStudio Ultimate 2023, you can start using it for video editing. Here are some basic steps to help you get started:

-

How to import and organize media files?

-

To import and organize media files, you need to do the following:

-

Corel VideoStudio Ultimate 2023 Serial Key
-Corel VideoStudio Ultimate X 2020 Download
-Corel VideoStudio Ultimate 2023 Multilingual
-Corel VideoStudio Ultimate X 2020 Crack
-Corel VideoStudio Ultimate 2023 Content Packs
-Corel VideoStudio Ultimate X 2020 Multilingual + Content Packs
-Corel VideoStudio Ultimate 2023 Keygen
-Corel VideoStudio Ultimate X 2020 Free Download
-Corel VideoStudio Ultimate 2023 Full Version
-Corel VideoStudio Ultimate X 2020 Full Crack
-Corel VideoStudio Ultimate 2023 Activation Code
-Corel VideoStudio Ultimate X 2020 License Key
-Corel VideoStudio Ultimate 2023 Patch
-Corel VideoStudio Ultimate X 2020 Registration Code
-Corel VideoStudio Ultimate 2023 Torrent
-Corel VideoStudio Ultimate X 2020 Key
-Corel VideoStudio Ultimate 2023 Crack Download
-Corel VideoStudio Ultimate X 2020 Crack Download
-Corel VideoStudio Ultimate 2023 Crack Free Download
-Corel VideoStudio Ultimate X 2020 Crack Free Download
-Corel VideoStudio Ultimate 2023 Crack Full Version Download
-Corel VideoStudio Ultimate X 2020 Crack Full Version Download
-Corel VideoStudio Ultimate 2023 Crack + Serial Key Download
-Corel VideoStudio Ultimate X 2020 Crack + License Key Download
-Corel VideoStudio Ultimate 2023 Crack + Keygen Download
-Corel VideoStudio Ultimate X 2020 Crack + Patch Download
-Corel VideoStudio Ultimate 2023 Crack + Activation Code Download
-Corel VideoStudio Ultimate X 2020 Crack + Registration Code Download
-Corel VideoStudio Ultimate 2023 Crack + Torrent Download
-Corel VideoStudio Ultimate X 2020 Crack + Key Download
-How to Install Corel VideoStudio Ultimate 23.0.1.404 Crack
-How to Activate Corel VideoStudio Ultimate 23.0.1.404 Crack
-How to Use Corel VideoStudio Ultimate 23.0.1.404 Crack
-How to Update Corel VideoStudio Ultimate 23.0.1.404 Crack
-How to Uninstall Corel VideoStudio Ultimate 23.0.1.404 Crack
-How to Fix Corel VideoStudio Ultimate 23.0.1.404 Crash Issue
-How to Get Corel VideoStudio Ultimate 23.0.1.404 for Free
-How to Get Corel VideoStudio Ultimate 23.0.1.404 for Mac
-How to Get Corel VideoStudio Ultimate 23.0.1.404 for Windows
-How to Get Corel VideoStudio Ultimate 23.0.1.404 for Linux
-What is New in Corel VideoStudio Ultimate 23.0.1.404 Crack
-What is the Difference Between Corel VideoStudio Pro and Ultimate
-What are the System Requirements for Corel VideoStudio Ultimate 23.0.1.404 Crack
-What are the Features of Corel VideoStudio Ultimate 23.0.1.404 Crack
-What are the Benefits of Using Corel VideoStudio Ultimate 23.0.1.404 Crack
-What are the Drawbacks of Using Corel VideoStudio Ultimate 23.0.1.404 Crack
-What are the Alternatives to Corel VideoStudio Ultimate 23.0.1.404 Crack
-What are the Reviews of Corel VideoStudio Ultimate 23.0.1.404 Crack
-What are the Tips and Tricks for Using Corel VideoStudio Ultimate 23.0.1.404 Crack
-What are the Best Practices for Using Corel VideoStudio Ultimate 23.0.1.404 Crack

-
    -
  1. Launch Corel VideoStudio Ultimate 2023 and choose a project mode: Timeline or Storyboard.
  2. -
  3. Click on the Import button on the toolbar and select Import Media Files or Import from Camera or Device.
  4. -
  5. Browse and select the media files you want to import. You can also drag and drop them from your computer or device.
  6. -
  7. The imported media files will appear in the Library panel. You can organize them into folders, tags, or collections by right-clicking on them and choosing an option.
  8. -
-

How to edit videos on the timeline?

-

To edit videos on the timeline, you need to do the following:

-
    -
  1. Drag and drop the media files from the Library panel to the timeline. You can also use the Insert or Overlay buttons on the toolbar.
  2. -
  3. To trim, crop, rotate, split, or merge clips, use the tools on the toolbar or right-click on them and choose an option.
  4. -
  5. To adjust the speed, duration, or reverse of clips, double-click on them and use the Options panel.
  6. -
  7. To add transitions between clips, click on the Transitions button on the toolbar and drag and drop a transition to the timeline. You can also double-click on a transition and use the Options panel to customize it.
  8. -
-

How to apply transitions, effects, filters, and titles?

-

To apply transitions, effects, filters, and titles, you need to do the following:

-
    -
  1. To apply effects or filters to clips, click on the FX button on the toolbar and drag and drop an effect or filter to a clip. You can also double-click on an effect or filter and use the Options panel to customize it.
  2. -
  3. To apply titles to clips, click on the Titles button on the toolbar and drag and drop a title to a clip. You can also double-click on a title and use the Options panel to customize it.
  4. -
  5. To apply overlays or animations to clips, click on the Overlays button on the toolbar and drag and drop an overlay or animation to a clip. You can also double-click on an overlay or animation and use the Options panel to customize it.
  6. -
-

How to use color grading, masking, and stabilization tools?

-

To use color grading, masking, and stabilization tools, you need to do the following:

-
    -
  1. the Color button on the toolbar and select a color grading option: Basic, Tone Curve, HSL, or LUT.
  2. -
  3. To use masking tools to apply effects or filters to a specific area of your videos, click on the Mask button on the toolbar and select a masking option: Shape, Paint, or Motion Tracking.
  4. -
  5. To use stabilization tools to correct shaky footage, click on the Stabilize button on the toolbar and select a stabilization option: proDAD Mercalli or VideoStudio Stabilizer.
  6. -
-

How to export and share videos?

-

To export and share videos, you need to do the following:

-
    -
  1. Click on the Export button on the toolbar and select an export option: File, Device, Web, or Disc.
  2. -
  3. Choose a format, quality, and location for your video. You can also customize the settings by clicking on the Options button.
  4. -
  5. Click on the Start button to begin exporting your video.
  6. -
  7. To share your video online, click on the Share button on the toolbar and select a platform: YouTube, Vimeo, Facebook, or Flickr. You can also sign in to your account and add a title, description, and tags for your video.
  8. -
-

Conclusion

-

Corel VideoStudio Ultimate 2023 is a powerful and easy-to-use video editor that can help you create amazing videos with ease and creativity. It has many features and tools that can suit any skill level and style. It also has a user-friendly interface that makes it easy to navigate and customize. You can download and install Corel VideoStudio Ultimate 2023 from this link and start using it for video editing. You can also check out some tutorials and tips from this link to learn more about Corel VideoStudio Ultimate 2023.

-

Why choose Corel VideoStudio Ultimate 2023 for video editing?

-

You should choose Corel VideoStudio Ultimate 2023 for video editing because:

- -

FAQs

-

Here are some frequently asked questions about Corel VideoStudio Ultimate 2023:

-
    -
  1. What are the system requirements for Corel VideoStudio Ultimate 2023?
  2. -

    The system requirements for Corel VideoStudio Ultimate 2023 are:

    - -
  3. How much does Corel VideoStudio Ultimate 2023 cost?
  4. -

    The price of Corel VideoStudio Ultimate 2023 is $99.99 for the full version or $79.99 for the upgrade version. You can also get a free trial version for 30 days from this link.

    -
  5. How can I get support for Corel VideoStudio Ultimate 2023?
  6. -

    , and forums from this link.

    -
  7. Is Corel VideoStudio Ultimate 2023 compatible with Windows 11?
  8. -

    Yes, Corel VideoStudio Ultimate 2023 is compatible with Windows 11. However, you may need to update your device drivers and software to ensure optimal performance.

    -
  9. Is Corel VideoStudio Ultimate 2023 safe to use?
  10. -

    Yes, Corel VideoStudio Ultimate 2023 is safe to use. However, you should always download and install it from the official website or a trusted source. You should also avoid using any cracked or pirated versions of the software, as they may contain viruses or malware that can harm your device or data.

    -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dreamweaver Cs6 Classroom In A Book Pdf Download.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dreamweaver Cs6 Classroom In A Book Pdf Download.md deleted file mode 100644 index eef52e84ac0092b0bda3af9eadca9ca2f5def44c..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dreamweaver Cs6 Classroom In A Book Pdf Download.md +++ /dev/null @@ -1,30 +0,0 @@ - -```html -

Dreamweaver Cs6 Classroom In A Book Pdf Download: Learn Web Design with Adobe's Official Guide

-

If you want to learn how to create professional and responsive websites using Adobe Dreamweaver CS6, you need to get your hands on the Dreamweaver Cs6 Classroom In A Book Pdf Download. This is the official training guide from Adobe Systems that covers everything you need to know about the latest version of Dreamweaver, the industry-leading web design software.

-

Dreamweaver Cs6 Classroom In A Book Pdf Download


Download Zip » https://byltly.com/2uKwei



-Dreamweaver Cs6 Classroom In A Book Pdf Download -

The Dreamweaver Cs6 Classroom In A Book Pdf Download contains 17 lessons that walk you through the basics of web design, such as creating HTML pages, adding text and images, styling with CSS, working with templates and libraries, and publishing your site. You will also learn how to use advanced features of Dreamweaver CS6, such as creating fluid grid layouts, adding interactivity with jQuery and Spry widgets, integrating with Adobe Business Catalyst and PhoneGap Build, and testing and debugging your site.

-

Why You Should Get the Dreamweaver Cs6 Classroom In A Book Pdf Download

-

There are many reasons why you should get the Dreamweaver Cs6 Classroom In A Book Pdf Download if you want to master web design with Dreamweaver CS6. Here are some of them:

- -

How to Get the Dreamweaver Cs6 Classroom In A Book Pdf Download

-

If you are interested in getting the Dreamweaver Cs6 Classroom In A Book Pdf Download, you have two options:

-
    -
  1. You can buy the paperback version of the book from Amazon or other online retailers and get the DVD-ROM with it.
  2. -
  3. You can download the PDF version of the book from Adobe's website and access the DVD-ROM content online.
  4. -
-

Either way, you will get access to the same high-quality content that will help you learn web design with Dreamweaver CS6. The PDF version of the book is especially convenient if you want to read it on your computer or mobile device without carrying a physical copy around.

-

Conclusion

-

The Dreamweaver Cs6 Classroom In A Book Pdf Download is a must-have resource for anyone who wants to learn web design with Adobe Dreamweaver CS6. It covers all the essential topics and features of the software in a clear and engaging way. It also provides you with practical exercises and projects that will help you apply your knowledge and skills in real-world scenarios. Whether you are a beginner or an experienced web designer, you will benefit from this comprehensive and official guide from Adobe Systems.

-

-

To get your copy of the Dreamweaver Cs6 Classroom In A Book Pdf Download, click on one of the links below:

-https://byltly.com/2uKzG4



-

So how do you get a registration code for Express Scribe? And how do you use it effectively? In this article, we will answer these questions and more. We will also provide some tips on how to troubleshoot common issues with Express Scribe.

-

How to Get a Registration Code for Express Scribe

-

Getting a registration code for Express Scribe is not difficult, but it requires some steps. Here is an overview of what you need to do:

- -

Let's look at each step in more detail.

-

How to find Express Scribe license details
-Express Scribe trial period expiration
-Express Scribe online purchase
-Express Scribe activation process
-Express Scribe registration window
-Express Scribe license per installation
-Express Scribe dictation software
-Express Scribe user license
-Express Scribe site license
-Express Scribe license classes
-Express Scribe upgrade pricing
-Express Scribe compatibility with word applications
-Express Scribe automation features
-Express Scribe voice recognition integration
-Express Scribe pro-level audio editing software
-Express Scribe crack keygen download
-Express Scribe serial key number
-Express Scribe free download full version
-Express Scribe transcription software review
-Express Scribe best alternative software
-How to use Express Scribe for transcription
-Express Scribe foot pedal support
-Express Scribe hotkeys and shortcuts
-Express Scribe playback speed control
-Express Scribe file format compatibility
-How to import audio files into Express Scribe
-How to sync Express Scribe with cloud services
-How to export transcripts from Express Scribe
-How to customize Express Scribe settings and preferences
-How to troubleshoot Express Scribe issues and errors
-How to contact Express Scribe customer support
-How to uninstall Express Scribe from your computer
-How to update Express Scribe to the latest version
-How to backup and restore Express Scribe data and settings
-How to access Express Scribe online help and tutorials
-How to join Express Scribe affiliate program and earn commissions
-How to get a refund for Express Scribe purchase
-How to transfer Express Scribe license to another computer or user
-How to renew Express Scribe subscription or maintenance plan
-How to verify the authenticity of your Express Scribe registration code
-How to get a discount or coupon for Express Scribe purchase
-How to share your feedback or testimonial about Express Scribe software
-How to connect your smartphone or tablet with Express Scribe app
-How to transcribe video files with Express Scribe software
-How to adjust the audio quality and volume in Express Scribe software
-How to use speech-to-text feature in Express Scribe software
-How to add timestamps and notes in your transcripts with Express Scribe software
-How to manage multiple users and projects with Express Scribe software
-How to collaborate with other transcribers using Express Scribe software

-

Purchase Express Scribe Online

-

The first step is to purchase Express Scribe online from the official website of NCH Software, the developer of the program. You can choose from different license classes depending on your needs and budget. For example, you can buy a single user license for $39.95 or a site license for $299.

-

After you complete your payment, you will receive an email with your 13-digit serial number. This is your license serial number that you need to activate online.

-

Activate Your Serial Number Online

-

The second step is to activate your serial number online at https://secure.nch.com.au/activate by filling out the form with your name, email address, serial number, and product name (Express Scribe). After you submit the form, you will receive another email with your registration code.

-

Enter Your Registration Code into the Software

-

The third step is to enter your registration code into the software and convert the trial version into the professional version. To do this, open Express Scribe and select Register Express Scribe from the File menu. Then copy and paste your registration code into the text box and click Register.

-

If you do not receive an error message, the registration code has been accepted and you can now use all the features of Express Scribe without any limitations.

-

How to Use Express Scribe Effectively

-

Now that you have a registration code for Express Scribe, you might be wondering how to use it effectively for transcription. Here are some tips and tricks that can help you improve your productivity and accuracy:

-

Adjust Playback Settings

-

One of the advantages of Express Scribe is that it allows you to adjust playback settings such as speed, volume, and tone according to your preferences and needs. You can access these settings by clicking on Options > Playback in the toolbar.

-

For example, you can increase or decrease the speed of playback using the slider or by pressing F9 or F10 on your keyboard. You can also adjust the volume using the slider or by pressing F7 or F8 on your keyboard. You can also change the tone of playback using the slider or by pressing F11 or F12 on your keyboard.

-

These settings can help you hear the audio more clearly and transcribe more efficiently.

-

Use Hotkeys and Pedals

-

Another way to use Express Scribe effectively is to use hotkeys and pedals to control playback and transcription without taking your hands off the keyboard or foot off the pedal. Hotkeys are keyboard shortcuts that allow you to perform common actions such as play, pause, rewind, fast forward, etc.

-

You can view and customize these hotkeys by clicking on Options > Hot Keys in the toolbar. For example, you can set Ctrl + P as play/pause or Ctrl + R as rewind.

-

Pedals are foot switches that allow you to control playback using your feet while typing with your hands. Pedals are especially useful for professional transcribers who need to transcribe long hours without interruption.

-

You can connect pedals to your computer using a USB port or an adapter cable. You can also configure pedals by clicking on Options > Controller Setup Wizard in the toolbar.

-

Sync with Word Processors and Voice Recognition Software

-

A third way to use Express Scribe effectively is to sync it with word processors and voice recognition software that can help you improve accuracy and efficiency of transcription.

-

You can sync Express Scribe with word processors such as Microsoft Word or Google Docs by clicking on Options > Transcription Options in the toolbar. This will allow you to type directly into these programs while listening to audio files in Express Scribe.

-

You can also sync Express Scribe with voice recognition software such as Dragon NaturallySpeaking or Windows Speech Recognition by clicking on Options > Speech Recognition in the toolbar. This will allow you to dictate instead of type while listening to audio files in Express Scribe.

-

How to Troubleshoot Common Issues with Express Scribe

-

Sometimes, you might encounter some issues with Express Scribe that can affect your transcription experience. Here are some solutions for common issues such as lost registration codes, outdated versions, and compatibility problems:

-

Recover Your Lost Registration Code

-

If you lose your registration code for Express Scribe, don't panic. You can recover it using one of these methods:

- -

Once you have your registration code, you can enter it into the software as explained above.

-

Upgrade Your Outdated Version

-

If you have an outdated version of Express Scribe, you might experience some issues such as bugs, errors, or incompatibility with newer audio formats. To avoid these problems, you should upgrade your version of Express Scribe at discounted pricing.

-

To do this, you need to visit https://www.nch.com.au/upgrade/index.html and enter your existing registration code. You will then be able to purchase the latest version of Express Scribe at a reduced price. You will receive a new registration code by email that you can use to activate the software.

-

Check Your Compatibility Requirements

-

If you have trouble playing some audio files or connecting some devices with Express Scribe, you might need to check your compatibility requirements for the software. Express Scribe has different requirements for different operating systems, audio formats, and hardware.

-

You can find the compatibility requirements for Express Scribe at https://www.nch.com.au/scribe/kb/656.html. This page lists the supported audio and video file formats, as well as the recommended operating systems and hardware specifications for Express Scribe.

-

If your system does not meet these requirements, you might need to update your software, drivers, or hardware to ensure optimal performance of Express Scribe.

-

Conclusion

-

Express Scribe is a powerful and user-friendly transcription software that can help you transcribe audio files with ease and accuracy. However, to use it fully, you need a registration code that you can obtain by purchasing the software online and activating your serial number online.

-

Once you have your registration code, you can enter it into the software and enjoy all the features of Express Scribe. You can also use some tips and tricks to use Express Scribe effectively, such as adjusting playback settings, using hotkeys and pedals, and syncing with word processors and voice recognition software.

-

If you encounter any issues with Express Scribe, such as lost registration codes, outdated versions, or compatibility problems, you can troubleshoot them by using the automated utility, contacting customer support, upgrading your version, or checking your compatibility requirements.

-

We hope this article has helped you understand what Express Scribe registration code is and how to get it and use it. If you have any questions or feedback, please feel free to contact us. We would love to hear from you.

-

FAQs

-

Here are some frequently asked questions related to Express Scribe registration code:

-

Q: How long does it take to receive my registration code after purchasing Express Scribe?

-

A: It usually takes a few minutes to receive your registration code by email after purchasing Express Scribe online. However, sometimes it might take longer due to network delays or spam filters. If you do not receive your registration code within 24 hours, please contact customer support at https://www.nch.com.au/support/supportcontact.html?software=ExpressScribe&support.

-

Q: Can I use my registration code on multiple computers?

-

A: No, you cannot use your registration code on multiple computers. Each registration code is valid for one installation only. If you want to use Express Scribe on more than one computer, you need to purchase additional licenses or a site license.

-

Q: Can I transfer my registration code to another person?

-

A: No, you cannot transfer your registration code to another person. Each registration code is associated with a specific name and email address and cannot be changed or transferred. If you want to give Express Scribe as a gift to someone else, you need to purchase a new license for them.

-

Q: What if I lose my registration code or serial number?

-

A: If you lose your registration code or serial number for Express Scribe, you can recover it using one of these methods:

- -

Q: How can I get a free trial of Express Scribe?

-

A: You can get a free trial of Express Scribe by downloading it from https://www.nch.com.au/scribe/index.html. The free trial version has limited features and expires after 14 days. To use the full functionality of Express Scribe without any limitations, you need to purchase a license and enter a registration code into the software.

-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Quantum Resonance Magnetic Analyzer Software.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Quantum Resonance Magnetic Analyzer Software.md deleted file mode 100644 index 2967221c296de30289d629cb6494a90a3062ce6a..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Quantum Resonance Magnetic Analyzer Software.md +++ /dev/null @@ -1,28 +0,0 @@ - -

How to Download Quantum Resonance Magnetic Analyzer Software

-

Quantum resonance magnetic analyzer software is a tool that can help you measure your health status and provide suggestions for improvement. It can analyze various aspects of your body, such as cardiovascular, gastrointestinal, liver, kidney, skin, endocrine, immune, etc. It can also detect heavy metals, allergies, coenzymes, amino acids, vitamins and other elements in your body.

-

But how can you download quantum resonance magnetic analyzer software? In this article, we will show you the steps to download the latest digital version of the software in English or Spanish.

-

Download quantum resonance magnetic analyzer software


Download ››››› https://imgfil.com/2uy0B4



-

Step 1: Check your machine compatibility

-

Before you download quantum resonance magnetic analyzer software, you need to make sure that your machine is compatible with the software. The software works in conjunction with a USB key that must be serialized. You should have received the USB key with your machine when you bought it. If you don't have the USB key or if your machine is not upgradeable with higher versions of the software, you may not be able to use the software.

-

You can check your machine compatibility by looking at the model and serial number of your machine. The latest digital version of the software is 4.7.0 and it works with machines that have similar models and serial numbers as shown in the image below:

-Quantum resonance magnetic analyzer machine compatible with version 4.7.0 -

If your machine is not compatible with version 4.7.0, you may need to look for other versions of the software that match your machine.

-

Step 2: Choose your language preference

-

The next step is to choose your language preference for the software. The software is available in English or Spanish. You can choose the language that suits you best and download the corresponding version of the software.

-

The English version of the software can be downloaded from this link: Download quantum resonance magnetic analyzer software in English

-

The Spanish version of the software can be downloaded from this link: Download quantum resonance magnetic analyzer software in Spanish

-

-

Step 3: Pay for the software

-

The third step is to pay for the software. The software costs $17.20 and you can pay with PayPal or credit card. Once you pay for the software, you will receive an email with a link to download the software.

-

Please note that there is no refund policy for the software, so make sure you check your machine compatibility and language preference before buying.

-

Step 4: Download and install the software

-

The final step is to download and install the software on your computer. You will need a Windows operating system to run the software. You will also need to insert the USB key into your computer before running the software.

-

To download the software, click on the link that you received in your email after paying for the software. You will see a zip file that contains the setup file and some instructions. Extract the zip file and run the setup file to install the software on your computer.

-

Follow the instructions on the screen to complete the installation process. Once the installation is done, you can launch the software and start using it.

-

Conclusion

-

Quantum resonance magnetic analyzer software is a useful tool that can help you monitor your health and wellness. It can provide you with detailed reports on various aspects of your body and give you suggestions for improvement.

-

To download quantum resonance magnetic analyzer software, you need to follow four steps: check your machine compatibility, choose your language preference, pay for the software and download and install it on your computer.

-

We hope this article has helped you learn how to download quantum resonance magnetic analyzer software. If you have any questions or comments,

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/American Truck Simulator um desafio realista e divertido.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/American Truck Simulator um desafio realista e divertido.md deleted file mode 100644 index 9cff9af66e69afb3cff36a4f9bcdb56c40ce067a..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/American Truck Simulator um desafio realista e divertido.md +++ /dev/null @@ -1,131 +0,0 @@ - -

Download do jogo American Truck Simulator para PC

-

Você gosta de caminhões e de viajar pelos Estados Unidos? Então você vai adorar o jogo American Truck Simulator, um dos melhores simuladores de caminhões do mercado. Neste artigo, vamos te mostrar o que é esse jogo, como baixá-lo e instalá-lo no seu PC, e por que vale a pena jogá-lo nessa plataforma. Vamos lá?

-

O que é American Truck Simulator?

-

American Truck Simulator é um jogo desenvolvido pela SCS Software, a mesma empresa responsável pelo sucesso Euro Truck Simulator 2. O jogo foi lançado em 2016 e desde então vem recebendo atualizações constantes com novos conteúdos e melhorias.

-

download do jogo american truck simulator para pc


Download File ✔✔✔ https://urlin.us/2uSW8F



-

Um simulador de caminhões realista e divertido

-

No jogo, você assume o papel de um motorista de caminhão que precisa entregar cargas variadas pelos Estados Unidos. Você começa como um empregado de uma empresa de transporte, mas pode evoluir até se tornar um dono de uma frota própria. Você pode escolher entre diversos modelos de caminhões licenciados de marcas famosas, como Volvo, Peterbilt, Kenworth, Western Star, entre outras. Você também pode personalizar o seu caminhão com pinturas, acessórios, motores, chassis, etc.

-

O jogo se destaca pelo seu realismo e atenção aos detalhes. Você precisa respeitar as leis de trânsito, os limites de velocidade, os pedágios, as paradas obrigatórias, o consumo de combustível, o desgaste do veículo, etc. Você também precisa lidar com as condições climáticas, o ciclo dia-noite, o tráfego variado, os acidentes, as obras nas estradas, etc. Tudo isso torna a experiência de dirigir um caminhão muito imersiva e desafiadora.

-

Um jogo com muitos recursos e conteúdos

-

American Truck Simulator não é apenas um simulador de caminhões, mas também um simulador de viagem. O jogo conta com cenários incríveis que reproduzem fielmente as paisagens e os pontos turísticos dos Estados Unidos. Você pode viajar por estados como Califórnia, Nevada, Arizona, Novo México, Oregon, Washington, Utah, Idaho, Colorado, Wyoming, Texas e Montana. Cada estado tem suas características próprias, como clima, vegetação, arquitetura, cultura, etc.

-

O jogo também oferece uma grande variedade de cargas para transportar. Você pode levar desde produtos agrícolas até equipamentos industriais. Cada carga tem seu peso, tamanho e valor específicos. Você precisa escolher bem as cargas que vai aceitar, pois elas afetam o seu lucro e a sua reputação. Você também precisa planejar bem a sua rota e o seu tempo de entrega.

-

Além disso, o jogo conta com um sistema de progressão que permite que você melh

ore a sua habilidade de condução, a sua eficiência de combustível, a sua pontualidade, etc. Você também pode contratar e treinar outros motoristas para trabalhar para você. Você pode administrar a sua empresa, comprar e vender caminhões, abrir e fechar filiais, etc.

-

Como baixar e instalar American Truck Simulator no PC?

-

Se você ficou interessado em jogar American Truck Simulator no seu PC, saiba que o processo é muito simples e rápido. Basta seguir os passos abaixo:

-

Requisitos mínimos e recomendados do sistema

-

Antes de baixar o jogo, é importante verificar se o seu PC atende aos requisitos mínimos e recomendados do sistema. Veja a tabela abaixo:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Requisitos mínimosRequisitos recomendados
Sistema operacional: Windows 7 64-bitSistema operacional: Windows 10 64-bit
Processador: Dual core CPU 2.4 GHzProcessador: Quad core CPU 3.0 GHz
Memória RAM: 4 GBMemória RAM: 6 GB
Placa de vídeo: GeForce GTS 450-class (Intel HD 4000)Placa de vídeo: GeForce GTX 760-class (2 GB)
Espaço em disco: 4 GBEspaço em disco: 4 GB
Internet: Conexão de banda largaInternet: Conexão de banda larga
-

Caso o seu PC não atenda aos requisitos mínimos, você pode ter problemas de desempenho, como travamentos, lentidão, baixa qualidade gráfica, etc. Caso o seu PC atenda aos requisitos recomendados, você poderá aproveitar o jogo com uma melhor qualidade gráfica, fluidez e estabilidade.

-

download do jogo american truck simulator para pc completo
-download do jogo american truck simulator para pc gratis
-download do jogo american truck simulator para pc via torrent
-download do jogo american truck simulator para pc fraco
-download do jogo american truck simulator para pc crackeado
-download do jogo american truck simulator para pc atualizado
-download do jogo american truck simulator para pc windows 10
-download do jogo american truck simulator para pc baixaki
-download do jogo american truck simulator para pc steam
-download do jogo american truck simulator para pc mega
-download do jogo american truck simulator para pc online
-download do jogo american truck simulator para pc utorrent
-download do jogo american truck simulator para pc portugues
-download do jogo american truck simulator para pc requisitos
-download do jogo american truck simulator para pc rapido
-download do jogo american truck simulator para pc original
-download do jogo american truck simulator para pc mods
-download do jogo american truck simulator para pc leve
-download do jogo american truck simulator para pc seguro
-download do jogo american truck simulator para pc full hd
-download do jogo american truck simulator para pc 32 bits
-download do jogo american truck simulator para pc 64 bits
-download do jogo american truck simulator para pc com mapa brasil
-download do jogo american truck simulator para pc com dlc
-download do jogo american truck simulator para pc com multiplayer
-download do jogo american truck simulator para pc com crack
-download do jogo american truck simulator para pc com serial
-download do jogo american truck simulator para pc com patch
-download do jogo american truck simulator para pc com tradução
-download do jogo american truck simulator para pc com tudo liberado
-download do jogo american truck simulator para pc sem virus
-download do jogo american truck simulator para pc sem erros
-download do jogo american truck simulator para pc sem senha
-download do jogo american truck simulator para pc sem propaganda
-download do jogo american truck simulator para pc sem cadastro
-download do jogo american truck simulator para pc sem internet
-download do jogo american truck simulator para pc sem placa de video
-download do jogo american truck simulator para pc sem lag
-download do jogo american truck simulator para pc sem instalar
-como fazer o download do jogo american truck simulator para pc
-onde fazer o download do jogo american truck simulator para pc
-qual o melhor site para fazer o download do jogo american truck simulator para pc
-qual o tamanho do arquivo de download do jogo american truck simulator para pc
-qual a versão mais recente de download do jogo american truck simulator para pc
-qual a velocidade de download necessária para baixar o jogo american truck simulator para pc

-

Passos para baixar e instalar o jogo

-

O jogo American Truck Simulator está disponível para compra na plataforma Steam, uma das mais populares e confiáveis do mercado. Para baixar e instalar o jogo no seu PC, siga os passos abaixo:

-
    -
  1. Acesse o site da Steam (https://store.steampowered.com/) e crie uma conta gratuita ou faça login com a sua conta existente.
  2. -
  3. Pesquise pelo jogo American Truck Simulator na barra de busca ou navegue pelas categorias até encontrá-lo.
  4. -
  5. Clique no botão "Adicionar ao carrinho" e finalize a sua compra. Você pode pagar com cartão de crédito, boleto bancário, PayPal, entre outras opções.
  6. -
  7. Aguarde a confirmação do pagamento e o recebimento do código do jogo no seu e-mail.
  8. -
  9. Baixe e instale o aplicativo da Steam no seu PC (https://store.steampowered.com/about/).
  10. -
  11. Acesse o aplicativo da Steam com a sua conta e clique na aba "Biblioteca". Lá você verá todos os jogos que você comprou ou ativou na plataforma.
  12. -
  13. Clique no jogo American Truck Simulator e depois em "Instalar". Escolha a pasta onde você quer salvar o jogo e aguarde o download e a instalação completarem.
  14. -
  15. Clique em "Jogar" e divirta-se!
  16. -
-

Por que jogar American Truck Simulator no PC?

-

Agora que você já sabe como baixar e instalar American Truck Simulator no seu PC, você deve estar se perguntando: por que jogar esse jogo nessa plataforma? A resposta é simples: porque o PC oferece muitas vantagens em relação a outras plataformas, como consoles ou dispositivos móveis. Veja algumas delas:

-

Os benefícios de jogar em uma tela grande e com um teclado e mouse

-

Jogar American Truck Simulator no PC permite que você aproveite melhor os gráficos impressionantes do jogo. Você pode jogar em uma tela grande, com alta resolução e qualidade de imagem. Você também pode ajustar as configurações gráficas de acordo com as suas preferências e capacidade do seu PC. Você pode desfrutar de uma maior imersão e realismo ao dirigir um caminhão pelos Estados Unidos.

-

Além disso, jogar no PC permite que você tenha um maior controle e precisão sobre o seu caminhão. Você pode usar um teclado e um mouse, que são dispositivos mais confortáveis e intuitivos para jogar esse tipo de jogo. Você pode configurar os comandos de acordo com a sua preferência e ter acesso a mais funções e atalhos. Você também pode usar outros periféricos, como volantes, pedais, joysticks, etc.

-

As vantagens de ter acesso a mods e atualizações

-

Outra grande vantagem de jogar American Truck Simulator no PC é que você pode ter acesso a mods e atualizações. Os mods são modificações feitas por outros jogadores ou desenvolvedores que adicionam novos conteúdos ou alteram aspectos do jogo original. Você pode encontrar mods de todos os tipos, como novos caminhões, cargas, mapas, sons, gráficos, etc. Os mods podem tornar o jogo mais divertido, variado e personalizado.

-

Para instalar os mods, você pode usar o Steam Workshop, uma ferramenta integrada à plataforma Steam que permite que você baixe e gerencie os mods facilmente. Você também pode usar sites externos que oferecem mods gratuitos ou pagos. Mas cuidado: alguns mods podem não ser compatíveis com o jogo ou com outros mods, causando problemas de desempenho ou estabilidade. Por isso, sempre verifique a origem, a qualidade e a atualização dos mods antes de instalá-los.

-

Além dos mods, você também pode ter acesso a atualizações oficiais do jogo. A SCS Software está sempre lançando novas atualizações que corrigem bugs, melhoram o desempenho e adicionam novos conteúdos. Você pode baixar as atualizações automaticamente pelo Steam ou manualmente pelo site oficial do jogo. As atualizações garantem que você tenha sempre a melhor versão do jogo disponível.

-

Conclusão

-

American Truck Simulator é um jogo incrível que simula com perfeição a experiência de dirigir um caminhão pelos Estados Unidos. O jogo conta com gráficos impressionantes, física realista, cenários variados, cargas diversificadas, sistema de progressão, entre outros recursos. O jogo é ideal para quem gosta de caminhões, de viagens e de desafios.

-

Para jogar American Truck Simulator no PC, você precisa comprar o jogo na plataforma Steam e baixá-lo e instalá-lo no seu computador. Você também precisa verificar se o seu PC atende aos requisitos mínimos ou recomendados do sistema. Jogar no PC oferece muitas vantagens, como uma melhor qualidade gráfica, um maior controle e precisão, e um acesso a mods e atualizações.

-

Esperamos que este artigo tenha sido útil para você. Se você gostou do jogo American Truck Simulator, não deixe de conferir também o Euro Truck Simulator 2, outro simulador de caminhões da mesma empresa que se passa na Europa. E se você quiser saber mais sobre jogos de simulação, fique ligado no nosso site. Até a próxima!

-

FAQs

-

Quanto custa o jogo American Truck Simulator?

-

O jogo American Truck Simulator custa R$ 36,99 na plataforma Steam. Esse preço inclui o jogo base e alguns conteúdos extras. Você também pode comprar pacotes adicionais que incluem novos estados, caminhões, cargas, etc. Esses pacotes variam de preço entre R$ 5,99 e R$ 18,99.

-

O jogo tem suporte para multiplayer?

-

O jogo American Truck Simulator não tem um modo multiplayer oficial integrado ao jogo. No entanto, você pode usar um mod chamado TruckersMP (https://truckersmp.com/) que permite que você jogue online com outros jogadores em servidores dedicados. Esse mod é gratuito e fácil de instalar e usar. Você pode participar de eventos, convoys, empresas virtuais, etc.

-

O jogo tem suporte para VR?

-

O jogo American Truck Simulator tem suporte para VR (realidade virtual) em fase experimental. Você precisa ter um dispositivo VR compatível com o SteamVR (como Oculus Rift or HTC Vive) e ativar a opção de VR no menu do jogo. Você também precisa ajustar as configurações gráficas e de controle para ter uma melhor experiência. Jogar em VR pode aumentar a imersão e o realismo do jogo, mas também pode causar enjoos ou desconfortos em alguns jogadores.

-

Quais são os estados disponíveis no jogo?

-

O jogo American Truck Simulator atualmente conta com 12 estados disponíveis para explorar. São eles: Califórnia, Nevada, Arizona, Novo México, Oregon, Washington, Utah, Idaho, Colorado, Wyoming, Texas e Montana. Cada estado tem suas próprias estradas, cidades, pontos turísticos, empresas, etc. A SCS Software está trabalhando para adicionar mais estados no futuro, como Dakota do Norte, Dakota do Sul, Nebraska, Kansas, Oklahoma, etc.

-

O jogo tem suporte para volantes e pedais?

-

O jogo American Truck Simulator tem suporte para volantes e pedais de diversas marcas e modelos. Você pode usar esses dispositivos para ter uma maior sensação de dirigir um caminhão de verdade. Você pode configurar os botões e os eixos do seu volante e pedais no menu do jogo. Você também pode usar outros acessórios, como câmbio manual, painel de instrumentos, etc.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/APKPure Offers Car Parking Multiplayer a Free and Challenging Driving Game.md b/spaces/1phancelerku/anime-remove-background/APKPure Offers Car Parking Multiplayer a Free and Challenging Driving Game.md deleted file mode 100644 index cf8f102879983348efda91aaf8ff475c6845e99d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/APKPure Offers Car Parking Multiplayer a Free and Challenging Driving Game.md +++ /dev/null @@ -1,165 +0,0 @@ -
-

Car Parking Multiplayer: A Review of the Open-World Parking Simulator

-

Do you love driving and parking cars? Do you want to experience a realistic open-world game with thousands of other players? If yes, then you should check out Car Parking Multiplayer, a game that offers more than just parking. In this article, we will review the features, gameplay, tips, and alternatives of Car Parking Multiplayer, and show you how to download and play it on your PC.

-

What is Car Parking Multiplayer?

-

Car Parking Multiplayer is a game that was developed by olzhass and released in 2017. It is available for Android and iOS devices, as well as PC via emulators. As the name suggests, Car Parking Multiplayer is a game that focuses on parking cars in various scenarios and locations. However, it also offers many other features that make it more than just a parking simulator.

-

apkpure car parking multiplayer


Download ->>->>->> https://jinyurl.com/2uNMv9



-

Features of Car Parking Multiplayer

-

Car Parking Multiplayer has many features that make it an enjoyable and immersive game. Here are some of them:

-

Multiplayer Open World Mode

-

This is the main mode of the game, where you can join thousands of other players in a free open world with real gas stations and car services. You can also compete against other players in multiplayer racing, exchange cars with other players, chat with them using voice chat, and even play as a police officer or a taxi driver. You can also walk around freely and explore the buildings with interior.

-

Car Customization and Tuning

-

You can choose from over 100 cars with real interior and customize them according to your preference. You can adjust the suspension, wheel angle, engine, turbo, gearbox, exhaust, and more. You can also change the appearance of your car with dynamic vinyls, car body parts, and plates.

-

apkpure car parking multiplayer mod apk
-apkpure car parking multiplayer download for pc
-apkpure car parking multiplayer hack
-apkpure car parking multiplayer unlimited money
-apkpure car parking multiplayer latest version
-apkpure car parking multiplayer online
-apkpure car parking multiplayer game
-apkpure car parking multiplayer free
-apkpure car parking multiplayer android
-apkpure car parking multiplayer ios
-apkpure car parking multiplayer cheats
-apkpure car parking multiplayer simulator
-apkpure car parking multiplayer 3d
-apkpure car parking multiplayer review
-apkpure car parking multiplayer update
-apkpure car parking multiplayer tips
-apkpure car parking multiplayer guide
-apkpure car parking multiplayer gameplay
-apkpure car parking multiplayer features
-apkpure car parking multiplayer best cars
-apkpure car parking multiplayer customizations
-apkpure car parking multiplayer maps
-apkpure car parking multiplayer modes
-apkpure car parking multiplayer challenges
-apkpure car parking multiplayer levels
-apkpure car parking multiplayer missions
-apkpure car parking multiplayer tricks
-apkpure car parking multiplayer codes
-apkpure car parking multiplayer skins
-apkpure car parking multiplayer graphics
-apkpure car parking multiplayer sounds
-apkpure car parking multiplayer controls
-apkpure car parking multiplayer settings
-apkpure car parking multiplayer requirements
-apkpure car parking multiplayer size
-apkpure car parking multiplayer rating
-apkpure car parking multiplayer feedback
-apkpure car parking multiplayer support
-apkpure car parking multiplayer developer
-apkpure car parking multiplayer alternatives
-apkpure car parking multiplayer similar games
-apkpure car parking multiplayer comparison
-apkpure car parking multiplayer benefits
-apkpure car parking multiplayer disadvantages
-apkpure car parking multiplayer pros and cons
-apkpure car parking multiplayer fun facts
-apkpure car parking multiplayer statistics
-apkpure car parking multiplayer trends
-apkpure car parking multiplayer news

-

High-Quality Open World

-

The game has high-quality graphics and sound effects that create a realistic environment. The game has seven different locations, such as cities, race tracks, deserts, off-roads, islands, and mountains. Each location has its own challenges and attractions.

-

Interesting Gameplay

-

The game has 82 real-life parking and driving challenges that test your skills and knowledge. You can also drive different vehicles, such as tow trucks, pickups, trucks, sports cars, and classic cars. The game also has daily tasks and rewards that give you coins and presents.

-

How to Download and Play Car Parking Multiplayer on PC?

-

If you want to enjoy Car Parking Multiplayer on a bigger screen and with better controls, you can play it on your PC using an emulator. An emulator is a software that allows you to run Android or iOS apps on your PC.

-

Benefits of Playing Car Parking Multiplayer on PC

-

There are many benefits of playing Car Parking Multiplayer on PC, such as:

- -

Espero que haya encontrado este artículo útil y aprendido cómo descargar e instalar archivos APK en sus dispositivos Android. Si tiene alguna pregunta o comentario, por favor deje un comentario abajo. ¡Gracias por leer!

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Carx Street Apk Obb Mod.md b/spaces/Benson/text-generation/Examples/Carx Street Apk Obb Mod.md deleted file mode 100644 index 1ed05bc24e21e6f93cef1be214724cda1f570323..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Carx Street Apk Obb Mod.md +++ /dev/null @@ -1,44 +0,0 @@ - -

CarX Street APK OBB Mod: Una guía para los amantes de los juegos de carreras

-

Si eres un fan de los juegos de carreras, es posible que hayas oído hablar de CarX Street, un nuevo juego de los desarrolladores de CarX Drift Racing. CarX Street es un juego realista de carreras callejeras que te permite conducir varios coches en diferentes pistas y competir con otros jugadores en línea o fuera de línea. Pero ¿qué pasa si quieres obtener más del juego, como dinero ilimitado, todos los coches desbloqueados y más opciones de personalización? Ahí es donde CarX Street APK OBB Mod viene muy bien. En este artículo, le diremos qué es CarX Street APK OBB Mod, qué características ofrece y cómo descargarlo e instalarlo en su dispositivo Android.

-

¿Qué es CarX Street APK OBB Mod?

-

CarX Street APK OBB Mod es una versión modificada del juego original de CarX Street que le da acceso a algunas características adicionales que no están disponibles en la versión oficial. Estas características incluyen dinero ilimitado, todos los coches desbloqueados, física realista y gráficos, garaje personalizable y ajuste, y los modos en línea y fuera de línea. Con CarX Street APK OBB Mod, se puede disfrutar del juego sin limitaciones ni restricciones.

-

carx street apk obb mod


DOWNLOADhttps://bltlly.com/2v6L0f



-

Características de CarX Street APK OBB Mod

-

Dinero ilimitado

-

Una de las principales características de CarX Street APK OBB Mod es que le da dinero ilimitado para gastar en la compra de coches nuevos, actualizarlos y personalizarlos. Usted no tiene que preocuparse de quedarse sin efectivo o moler durante horas para ganar suficiente dinero. Puedes comprar cualquier auto que quieras, desde autos deportivos hasta autos deportivos, y hacer que se vean y actúen como quieras.

-

Todos los coches desbloqueados

-

Otra característica de CarX Street APK OBB Mod es que desbloquea todos los coches en el juego para usted. No tienes que completar ninguna misión o desafío para desbloquearlos. Puede elegir entre más de 50 coches, cada uno con sus propias características y especificaciones. También puede cambiar entre diferentes coches en cualquier momento que desee, dependiendo de su estado de ánimo y preferencia.

- -

CarX Street APK OBB Mod también mejora la física y los gráficos del juego, por lo que es más realista y envolvente. El juego utiliza el motor CarX, que es conocido por su física realista del coche y el comportamiento. Puede sentir el peso, la velocidad y la tracción de cada automóvil mientras los conduce en diferentes superficies y condiciones. El juego también tiene impresionantes gráficos y efectos, como iluminación dinámica, sombras, reflejos, humo, polvo y chispas. Puedes ver cada detalle de tu coche y el entorno mientras corres por las calles.

-

Garaje personalizable y ajuste

-

Otra característica de CarX Street APK OBB Mod es que le permite personalizar su garaje y afinar sus coches como desee. Puede cambiar el color, pintura, calcomanías, ruedas, neumáticos, alerones, parachoques, campanas, escapes, luces, espejos, ventanas y más de sus coches. También puede ajustar la potencia del motor, par, suspensión, frenos, dirección, caja de cambios, diferencial, camber, dedo del pie, presión de los neumáticos, y más de sus coches. Puedes crear tu propio estilo y rendimiento único para cada coche.

-

Modos online y offline

-

CarX Street APK OBB Mod también le permite jugar el juego en línea o fuera de línea. Puede conectarse con otros enlaces maliciosos que podrían dañar su dispositivo o robar sus datos. Puedes usar este enlace como ejemplo de una fuente de confianza, pero también puedes hacer tu propia investigación y encontrar otras fuentes en las que confíes.

-

Paso 2: Habilitar fuentes desconocidas en su dispositivo

-

El segundo paso es habilitar fuentes desconocidas en el dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store. Para habilitar Fuentes desconocidas, debe ir a la configuración del dispositivo, luego a Seguridad, luego a Fuentes desconocidas y activarla. También es posible que necesite confirmar esta acción tocando OK o Permitir.

-

-

Paso 3: Instalar el archivo APK y extraer el archivo OBB

- -

Paso 4: Mueva la carpeta OBB al directorio de Android/OBB

-

El cuarto paso es mover la carpeta OBB al directorio Android/ OBB en su dispositivo. Aquí es donde los datos del juego se almacena y se accede por el juego. Para mover la carpeta OBB, debe usar una aplicación de administrador de archivos, como ES File Explorer, y copiar o cortar la carpeta desde su ubicación original, y pegarla en el directorio de Android/ OBB. También puede ser necesario crear una nueva carpeta llamada OBB si no existe ya.

-

Paso 5: Iniciar el juego y disfrutar de

-

El paso final es lanzar el juego y disfrutar de CarX Street APK OBB Mod. Para iniciar el juego, necesitas encontrar su icono en la pantalla de inicio del dispositivo o en el cajón de la aplicación, y pulsa en él. También es posible que tenga que verificar su edad o aceptar algunos términos y condiciones antes de que comience el juego. Una vez que el juego se carga, puede elegir su coche, pista, modo, y empezar a correr con dinero ilimitado, todos los coches desbloqueados, y más características.

-

Conclusión

-

CarX Street APK OBB Mod es una gran manera de disfrutar de CarX Street, un juego de carreras de calle realista que le permite conducir varios coches en diferentes pistas y competir con otros jugadores en línea o fuera de línea. Con CarX Street APK OBB Mod, puede obtener dinero ilimitado, todos los coches desbloqueados, la física realista y gráficos, garaje personalizable y ajuste, y los modos en línea y fuera de línea. Para descargar e instalar CarX Street APK OBB Mod, solo tiene que seguir cinco sencillos pasos: descargar los archivos APK y OBB de una fuente de confianza, habilitar fuentes desconocidas en su dispositivo, instalar el archivo APK y extraer el archivo OBB, mover la carpeta OBB al directorio Android/ OBB, y lanzar el juego y disfrutar. Esperamos que este artículo le haya sido útil. Si tiene alguna pregunta o comentario, no dude en dejarlos en la sección de comentarios a continuación.

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre CarX Street APK OBB Mod:

-

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/extract_depth.py b/spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/extract_depth.py deleted file mode 100644 index d6aa0d80c63a3e580fa28e0f2c7af4e9ae003b64..0000000000000000000000000000000000000000 --- a/spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/extract_depth.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import torch -import numpy as np -from tqdm import trange -from PIL import Image - - -def get_state(gpu): - import torch - midas = torch.hub.load("intel-isl/MiDaS", "MiDaS") - if gpu: - midas.cuda() - midas.eval() - - midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") - transform = midas_transforms.default_transform - - state = {"model": midas, - "transform": transform} - return state - - -def depth_to_rgba(x): - assert x.dtype == np.float32 - assert len(x.shape) == 2 - y = x.copy() - y.dtype = np.uint8 - y = y.reshape(x.shape+(4,)) - return np.ascontiguousarray(y) - - -def rgba_to_depth(x): - assert x.dtype == np.uint8 - assert len(x.shape) == 3 and x.shape[2] == 4 - y = x.copy() - y.dtype = np.float32 - y = y.reshape(x.shape[:2]) - return np.ascontiguousarray(y) - - -def run(x, state): - model = state["model"] - transform = state["transform"] - hw = x.shape[:2] - with torch.no_grad(): - prediction = model(transform((x + 1.0) * 127.5).cuda()) - prediction = torch.nn.functional.interpolate( - prediction.unsqueeze(1), - size=hw, - mode="bicubic", - align_corners=False, - ).squeeze() - output = prediction.cpu().numpy() - return output - - -def get_filename(relpath, level=-2): - # save class folder structure and filename: - fn = relpath.split(os.sep)[level:] - folder = fn[-2] - file = fn[-1].split('.')[0] - return folder, file - - -def save_depth(dataset, path, debug=False): - os.makedirs(path) - N = len(dset) - if debug: - N = 10 - state = get_state(gpu=True) - for idx in trange(N, desc="Data"): - ex = dataset[idx] - image, relpath = ex["image"], ex["relpath"] - folder, filename = get_filename(relpath) - # prepare - folderabspath = os.path.join(path, folder) - os.makedirs(folderabspath, exist_ok=True) - savepath = os.path.join(folderabspath, filename) - # run model - xout = run(image, state) - I = depth_to_rgba(xout) - Image.fromarray(I).save("{}.png".format(savepath)) - - -if __name__ == "__main__": - from taming.data.imagenet import ImageNetTrain, ImageNetValidation - out = "data/imagenet_depth" - if not os.path.exists(out): - print("Please create a folder or symlink '{}' to extract depth data ".format(out) + - "(be prepared that the output size will be larger than ImageNet itself).") - exit(1) - - # go - dset = ImageNetValidation() - abspath = os.path.join(out, "val") - if os.path.exists(abspath): - print("{} exists - not doing anything.".format(abspath)) - else: - print("preparing {}".format(abspath)) - save_depth(dset, abspath) - print("done with validation split") - - dset = ImageNetTrain() - abspath = os.path.join(out, "train") - if os.path.exists(abspath): - print("{} exists - not doing anything.".format(abspath)) - else: - print("preparing {}".format(abspath)) - save_depth(dset, abspath) - print("done with train split") - - print("done done.") diff --git a/spaces/CVPR/lama-example/bin/paper_runfiles/generate_val_test.sh b/spaces/CVPR/lama-example/bin/paper_runfiles/generate_val_test.sh deleted file mode 100644 index d9b2a370ceeeb8f401706f4303298db13e5fad91..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/bin/paper_runfiles/generate_val_test.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# !!! file set to make test_large_30k from the vanilla test_large: configs/test_large_30k.lst - -# paths to data are valid for mml7 -PLACES_ROOT="/data/inpainting/Places365" -OUT_DIR="/data/inpainting/paper_data/Places365_val_test" - -source "$(dirname $0)/env.sh" - -for datadir in test_large_30k # val_large -do - for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512 - do - "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \ - "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 8 - - "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" - done - - for conf in segm_256 segm_512 - do - "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \ - "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 2 - - "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" - done -done diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/datasets/coco_zeroshot_categories.py b/spaces/CVPR/regionclip-demo/detectron2/data/datasets/coco_zeroshot_categories.py deleted file mode 100644 index 8ebaec6600c218a534d45c7401f90cc20bca8b09..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/data/datasets/coco_zeroshot_categories.py +++ /dev/null @@ -1,206 +0,0 @@ -# COCO categories for zero-shot setting -# 65 categories in total, 48 base categories for training, 17 unseen categories are only used in testing -# from http://ankan.umiacs.io/files/mscoco_seen_classes.json, http://ankan.umiacs.io/files/mscoco_unseen_classes.json - -# 17 class names in order, obtained from load_coco_json() function -COCO_UNSEEN_CLS = ['airplane', 'bus', 'cat', 'dog', 'cow', 'elephant', 'umbrella', \ - 'tie', 'snowboard', 'skateboard', 'cup', 'knife', 'cake', 'couch', 'keyboard', \ - 'sink', 'scissors'] - -# 48 class names in order, obtained from load_coco_json() function -COCO_SEEN_CLS = ['person', 'bicycle', 'car', 'motorcycle', 'train', 'truck', \ - 'boat', 'bench', 'bird', 'horse', 'sheep', 'bear', 'zebra', 'giraffe', \ - 'backpack', 'handbag', 'suitcase', 'frisbee', 'skis', 'kite', 'surfboard', \ - 'bottle', 'fork', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', \ - 'broccoli', 'carrot', 'pizza', 'donut', 'chair', 'bed', 'toilet', 'tv', \ - 'laptop', 'mouse', 'remote', 'microwave', 'oven', 'toaster', \ - 'refrigerator', 'book', 'clock', 'vase', 'toothbrush'] - -# 65 class names in order, obtained from load_coco_json() function -COCO_OVD_ALL_CLS = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', \ - 'bus', 'train', 'truck', 'boat', 'bench', 'bird', 'cat', 'dog', 'horse', \ - 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', \ - 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'kite', 'skateboard', \ - 'surfboard', 'bottle', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', \ - 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'pizza', 'donut', 'cake', \ - 'chair', 'couch', 'bed', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', \ - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', \ - 'scissors', 'toothbrush'] - -# 80 class names -COCO_80_ALL_CLS = {1: 'person', - 2: 'bicycle', - 3: 'car', - 4: 'motorcycle', - 5: 'airplane', - 6: 'bus', - 7: 'train', - 8: 'truck', - 9: 'boat', - 10: 'traffic light', - 11: 'fire hydrant', - 12: 'stop sign', - 13: 'parking meter', - 14: 'bench', - 15: 'bird', - 16: 'cat', - 17: 'dog', - 18: 'horse', - 19: 'sheep', - 20: 'cow', - 21: 'elephant', - 22: 'bear', - 23: 'zebra', - 24: 'giraffe', - 25: 'backpack', - 26: 'umbrella', - 27: 'handbag', - 28: 'tie', - 29: 'suitcase', - 30: 'frisbee', - 31: 'skis', - 32: 'snowboard', - 33: 'sports ball', - 34: 'kite', - 35: 'baseball bat', - 36: 'baseball glove', - 37: 'skateboard', - 38: 'surfboard', - 39: 'tennis racket', - 40: 'bottle', - 41: 'wine glass', - 42: 'cup', - 43: 'fork', - 44: 'knife', - 45: 'spoon', - 46: 'bowl', - 47: 'banana', - 48: 'apple', - 49: 'sandwich', - 50: 'orange', - 51: 'broccoli', - 52: 'carrot', - 53: 'hot dog', - 54: 'pizza', - 55: 'donut', - 56: 'cake', - 57: 'chair', - 58: 'couch', - 59: 'potted plant', - 60: 'bed', - 61: 'dining table', - 62: 'toilet', - 63: 'tv', - 64: 'laptop', - 65: 'mouse', - 66: 'remote', - 67: 'keyboard', - 68: 'cell phone', - 69: 'microwave', - 70: 'oven', - 71: 'toaster', - 72: 'sink', - 73: 'refrigerator', - 74: 'book', - 75: 'clock', - 76: 'vase', - 77: 'scissors', - 78: 'teddy bear', - 79: 'hair drier', - 80: 'toothbrush'} - -if __name__ == "__main__": - # from https://github.com/alirezazareian/ovr-cnn/blob/master/ipynb/001.ipynb - # Create zero-shot setting data split in COCO - import json - import ipdb - - with open('./datasets/coco/annotations/instances_train2017.json', 'r') as fin: - coco_train_anno_all = json.load(fin) - - with open('./datasets/coco/annotations/instances_train2017.json', 'r') as fin: - coco_train_anno_seen = json.load(fin) - - with open('./datasets/coco/annotations/instances_train2017.json', 'r') as fin: - coco_train_anno_unseen = json.load(fin) - - with open('./datasets/coco/annotations/instances_val2017.json', 'r') as fin: - coco_val_anno_all = json.load(fin) - - with open('./datasets/coco/annotations/instances_val2017.json', 'r') as fin: - coco_val_anno_seen = json.load(fin) - - with open('./datasets/coco/annotations/instances_val2017.json', 'r') as fin: - coco_val_anno_unseen = json.load(fin) - - labels_seen = COCO_SEEN_CLS - labels_unseen = COCO_UNSEEN_CLS - labels_all = [item['name'] for item in coco_val_anno_all['categories']] # 80 class names - # len(labels_seen), len(labels_unseen) - # set(labels_seen) - set(labels_all) - # set(labels_unseen) - set(labels_all) - - class_id_to_split = {} # {1: 'seen', 2: 'seen', 3: 'seen', 4: 'seen', 5: 'unseen',...} - class_name_to_split = {} # {'person': 'seen', 'bicycle': 'seen', 'car': 'seen', 'motorcycle': 'seen', 'airplane': 'unseen',...} - for item in coco_val_anno_all['categories']: - if item['name'] in labels_seen: - class_id_to_split[item['id']] = 'seen' - class_name_to_split[item['name']] = 'seen' - elif item['name'] in labels_unseen: - class_id_to_split[item['id']] = 'unseen' - class_name_to_split[item['name']] = 'unseen' - - # class_name_to_emb = {} - # with open('../datasets/coco/zero-shot/glove.6B.300d.txt', 'r') as fin: - # for row in fin: - # row_tk = row.split() - # if row_tk[0] in class_name_to_split: - # class_name_to_emb[row_tk[0]] = [float(num) for num in row_tk[1:]] - # len(class_name_to_emb), len(class_name_to_split) - - def filter_annotation(anno_dict, split_name_list): - """ - COCO annotations have fields: dict_keys(['info', 'licenses', 'images', 'annotations', 'categories']) - This function (1) filters the category metadata (list) in 'categories'; - (2) filter instance annotation in 'annotations'; (3) filter image metadata (list) in 'images - """ - filtered_categories = [] - for item in anno_dict['categories']: - if class_id_to_split.get(item['id']) in split_name_list: - #item['embedding'] = class_name_to_emb[item['name']] - item['split'] = class_id_to_split.get(item['id']) - filtered_categories.append(item) - anno_dict['categories'] = filtered_categories - - filtered_images = [] - filtered_annotations = [] - useful_image_ids = set() - for item in anno_dict['annotations']: - if class_id_to_split.get(item['category_id']) in split_name_list: - filtered_annotations.append(item) - useful_image_ids.add(item['image_id']) - for item in anno_dict['images']: - if item['id'] in useful_image_ids: - filtered_images.append(item) - anno_dict['annotations'] = filtered_annotations - anno_dict['images'] = filtered_images - - filter_annotation(coco_train_anno_seen, ['seen']) - filter_annotation(coco_train_anno_unseen, ['unseen']) - filter_annotation(coco_train_anno_all, ['seen', 'unseen']) - filter_annotation(coco_val_anno_seen, ['seen']) - filter_annotation(coco_val_anno_unseen, ['unseen']) - filter_annotation(coco_val_anno_all, ['seen', 'unseen']) - - with open('./datasets/coco/annotations/ovd_ins_train2017_b.json', 'w') as fout: - json.dump(coco_train_anno_seen, fout) - with open('./datasets/coco/annotations/ovd_ins_train2017_t.json', 'w') as fout: - json.dump(coco_train_anno_unseen, fout) - with open('./datasets/coco/annotations/ovd_ins_train2017_all.json', 'w') as fout: - json.dump(coco_train_anno_all, fout) - with open('./datasets/coco/annotations/ovd_ins_val2017_b.json', 'w') as fout: - json.dump(coco_val_anno_seen, fout) - with open('./datasets/coco/annotations/ovd_ins_val2017_t.json', 'w') as fout: - json.dump(coco_val_anno_unseen, fout) - with open('./datasets/coco/annotations/ovd_ins_val2017_all.json', 'w') as fout: - json.dump(coco_val_anno_all, fout) \ No newline at end of file diff --git a/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py b/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py deleted file mode 100644 index 731320e74ebed4d8ceec58c07cb906542b8b021b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import ( - dataloader, - lr_multiplier, - model, - optimizer, - train, -) - -train.max_iter *= 2 # 100ep -> 200ep - -lr_multiplier.scheduler.milestones = [ - milestone * 2 for milestone in lr_multiplier.scheduler.milestones -] -lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/spaces/Chris1/real2sim/app.py b/spaces/Chris1/real2sim/app.py deleted file mode 100644 index 6979f48955afdcb92371be06310ef2a5073d80a4..0000000000000000000000000000000000000000 --- a/spaces/Chris1/real2sim/app.py +++ /dev/null @@ -1,71 +0,0 @@ - -import os - -from PIL import Image -from torchvision import transforms as T -from torchvision.transforms import Compose, Resize, ToTensor, Normalize, RandomCrop, RandomHorizontalFlip -from torchvision.utils import make_grid -from torch.utils.data import DataLoader -from huggan.pytorch.cyclegan.modeling_cyclegan import GeneratorResNet -import torch.nn as nn -import torch -import gradio as gr - -from collections import OrderedDict -import glob - - - - -def pred_pipeline(img, transforms): - orig_shape = img.shape - input = transforms(img) - input = input.unsqueeze(0) - output_syn = real2sim(input) - output_real = sim2real(output_syn) - out_img_syn = make_grid(output_syn, - nrow=1, normalize=True) - out_img_real = make_grid(output_real, - nrow=1, normalize=True) - - - - out_transform = Compose([ - T.Resize(orig_shape[:2]), - T.ToPILImage() - ]) - return out_transform(out_img_syn), out_transform(out_img_real) - - - - -n_channels = 3 -image_size = 512 -input_shape = (image_size, image_size) - -transform = Compose([ - T.ToPILImage(), - T.Resize(input_shape), - ToTensor(), - Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ]) - - -sim2real = GeneratorResNet.from_pretrained('Chris1/sim2real-512', input_shape=(n_channels, image_size, image_size), - num_residual_blocks=9) -real2sim = GeneratorResNet.from_pretrained('Chris1/real2sim-512', input_shape=(n_channels, image_size, image_size), - num_residual_blocks=9) - -gr.Interface(lambda image: pred_pipeline(image, transform), - inputs=gr.inputs.Image( label='input synthetic image'), - outputs=[ - gr.outputs.Image( type="pil",label='GAN real2sim prediction: style transfer of the input to the synthetic world '), - gr.outputs.Image( type="pil",label='GAN sim2real prediction: translation to real of the above prediction') - ],#plot, - title = "Cityscapes (real) to GTA5(simulated) translation", - examples = [ - [example] for example in glob.glob('./samples/*.png') - ])\ - .launch() - - diff --git a/spaces/Cristiants/captiongeneration/app.py b/spaces/Cristiants/captiongeneration/app.py deleted file mode 100644 index c996323e66a48f890f6278ee0dab21f9c1ef6e64..0000000000000000000000000000000000000000 --- a/spaces/Cristiants/captiongeneration/app.py +++ /dev/null @@ -1,37 +0,0 @@ -import requests -from PIL import Image -from transformers import AutoProcessor, Blip2ForConditionalGeneration -import torch -import gradio as gr - -processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") -model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) - -device = "cuda" if torch.cuda.is_available() else "cpu" -model.to(device) - -# def predict(inp): -# inp = transforms.ToTensor()(inp).unsqueeze(0) -# with torch.no_grad(): -# prediction = torch.nn.functional.softmax(model(inp)[0], dim=0) -# confidences = {labels[i]: float(prediction[i]) for i in range(1000)} -# return confidences - -# demo = gr.Interface(fn=predict, -# inputs=gr.inputs.Image(type="pil"), -# outputs=gr.outputs.Label(num_top_classes=3) -# ) - -def predict(imageurl): - image = Image.open(requests.get(imageurl, stream=True).raw).convert('RGB') - inputs = processor(image, return_tensors="pt") - generated_ids = model.generate(**inputs, max_new_tokens=20) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() - return('caption: '+generated_text) - -demo = gr.Interface(fn=predict, - inputs="text", - outputs=gr.outputs.Label(num_top_classes=3) - ) - -demo.launch() \ No newline at end of file diff --git a/spaces/Cyril666/my_abi/modules/__init__.py b/spaces/Cyril666/my_abi/modules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/expr/funcs.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/expr/funcs.py deleted file mode 100644 index c4a73f4c9d118f9c64163086445eb2448630daea..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/expr/funcs.py +++ /dev/null @@ -1,192 +0,0 @@ -from .core import FunctionExpression - - -FUNCTION_LISTING = { - "isArray": r"Returns true if _value_ is an array, false otherwise.", - "isBoolean": r"Returns true if _value_ is a boolean (`true` or `false`), false otherwise.", - "isDate": r"Returns true if _value_ is a Date object, false otherwise. This method will return false for timestamp numbers or date-formatted strings; it recognizes Date objects only.", - "isDefined": r"Returns true if _value_ is a defined value, false if _value_ equals `undefined`. This method will return true for `null` and `NaN` values.", - "isNumber": r"Returns true if _value_ is a number, false otherwise. `NaN` and `Infinity` are considered numbers.", - "isObject": r"Returns true if _value_ is an object (including arrays and Dates), false otherwise.", - "isRegExp": r"Returns true if _value_ is a RegExp (regular expression) object, false otherwise.", - "isString": r"Returns true if _value_ is a string, false otherwise.", - "isValid": r"Returns true if _value_ is not `null`, `undefined`, or `NaN`, false otherwise.", - "toBoolean": r"Coerces the input _value_ to a string. Null values and empty strings are mapped to `null`.", - "toDate": r"Coerces the input _value_ to a Date instance. Null values and empty strings are mapped to `null`. If an optional _parser_ function is provided, it is used to perform date parsing, otherwise `Date.parse` is used. Be aware that `Date.parse` has different implementations across browsers!", - "toNumber": r"Coerces the input _value_ to a number. Null values and empty strings are mapped to `null`.", - "toString": r"Coerces the input _value_ to a string. Null values and empty strings are mapped to `null`.", - "if": r"If _test_ is truthy, returns _thenValue_. Otherwise, returns _elseValue_. The _if_ function is equivalent to the ternary operator `a ? b : c`.", - "isNaN": r"Returns true if _value_ is not a number. Same as JavaScript's `isNaN`.", - "isFinite": r"Returns true if _value_ is a finite number. Same as JavaScript's `isFinite`.", - "abs": r"Returns the absolute value of _value_. Same as JavaScript's `Math.abs`.", - "acos": r"Trigonometric arccosine. Same as JavaScript's `Math.acos`.", - "asin": r"Trigonometric arcsine. Same as JavaScript's `Math.asin`.", - "atan": r"Trigonometric arctangent. Same as JavaScript's `Math.atan`.", - "atan2": r"Returns the arctangent of _dy / dx_. Same as JavaScript's `Math.atan2`.", - "ceil": r"Rounds _value_ to the nearest integer of equal or greater value. Same as JavaScript's `Math.ceil`.", - "clamp": r"Restricts _value_ to be between the specified _min_ and _max_.", - "cos": r"Trigonometric cosine. Same as JavaScript's `Math.cos`.", - "exp": r"Returns the value of _e_ raised to the provided _exponent_. Same as JavaScript's `Math.exp`.", - "floor": r"Rounds _value_ to the nearest integer of equal or lower value. Same as JavaScript's `Math.floor`.", - "hypot": r"Returns the square root of the sum of squares of its arguments. Same as JavaScript's `Math.hypot`.", - "log": r"Returns the natural logarithm of _value_. Same as JavaScript's `Math.log`.", - "max": r"Returns the maximum argument value. Same as JavaScript's `Math.max`.", - "min": r"Returns the minimum argument value. Same as JavaScript's `Math.min`.", - "pow": r"Returns _value_ raised to the given _exponent_. Same as JavaScript's `Math.pow`.", - "random": r"Returns a pseudo-random number in the range [0,1). Same as JavaScript's `Math.random`.", - "round": r"Rounds _value_ to the nearest integer. Same as JavaScript's `Math.round`.", - "sin": r"Trigonometric sine. Same as JavaScript's `Math.sin`.", - "sqrt": r"Square root function. Same as JavaScript's `Math.sqrt`.", - "tan": r"Trigonometric tangent. Same as JavaScript's `Math.tan`.", - "sampleNormal": r"Returns a sample from a univariate [normal (Gaussian) probability distribution](https://en.wikipedia.org/wiki/Normal_distribution) with specified _mean_ and standard deviation _stdev_. If unspecified, the mean defaults to `0` and the standard deviation defaults to `1`.", - "cumulativeNormal": r"Returns the value of the [cumulative distribution function](https://en.wikipedia.org/wiki/Cumulative_distribution_function) at the given input domain _value_ for a normal distribution with specified _mean_ and standard deviation _stdev_. If unspecified, the mean defaults to `0` and the standard deviation defaults to `1`.", - "densityNormal": r"Returns the value of the [probability density function](https://en.wikipedia.org/wiki/Probability_density_function) at the given input domain _value_, for a normal distribution with specified _mean_ and standard deviation _stdev_. If unspecified, the mean defaults to `0` and the standard deviation defaults to `1`.", - "quantileNormal": r"Returns the quantile value (the inverse of the [cumulative distribution function](https://en.wikipedia.org/wiki/Cumulative_distribution_function)) for the given input _probability_, for a normal distribution with specified _mean_ and standard deviation _stdev_. If unspecified, the mean defaults to `0` and the standard deviation defaults to `1`.", - "sampleLogNormal": r"Returns a sample from a univariate [log-normal probability distribution](https://en.wikipedia.org/wiki/Log-normal_distribution) with specified log _mean_ and log standard deviation _stdev_. If unspecified, the log mean defaults to `0` and the log standard deviation defaults to `1`.", - "cumulativeLogNormal": r"Returns the value of the [cumulative distribution function](https://en.wikipedia.org/wiki/Cumulative_distribution_function) at the given input domain _value_ for a log-normal distribution with specified log _mean_ and log standard deviation _stdev_. If unspecified, the log mean defaults to `0` and the log standard deviation defaults to `1`.", - "densityLogNormal": r"Returns the value of the [probability density function](https://en.wikipedia.org/wiki/Probability_density_function) at the given input domain _value_, for a log-normal distribution with specified log _mean_ and log standard deviation _stdev_. If unspecified, the log mean defaults to `0` and the log standard deviation defaults to `1`.", - "quantileLogNormal": r"Returns the quantile value (the inverse of the [cumulative distribution function](https://en.wikipedia.org/wiki/Cumulative_distribution_function)) for the given input _probability_, for a log-normal distribution with specified log _mean_ and log standard deviation _stdev_. If unspecified, the log mean defaults to `0` and the log standard deviation defaults to `1`.", - "sampleUniform": r"Returns a sample from a univariate [continuous uniform probability distribution](https://en.wikipedia.org/wiki/Uniform_distribution_(continuous)) over the interval [_min_, _max_). If unspecified, _min_ defaults to `0` and _max_ defaults to `1`. If only one argument is provided, it is interpreted as the _max_ value.", - "cumulativeUniform": r"Returns the value of the [cumulative distribution function](https://en.wikipedia.org/wiki/Cumulative_distribution_function) at the given input domain _value_ for a uniform distribution over the interval [_min_, _max_). If unspecified, _min_ defaults to `0` and _max_ defaults to `1`. If only one argument is provided, it is interpreted as the _max_ value.", - "densityUniform": r"Returns the value of the [probability density function](https://en.wikipedia.org/wiki/Probability_density_function) at the given input domain _value_, for a uniform distribution over the interval [_min_, _max_). If unspecified, _min_ defaults to `0` and _max_ defaults to `1`. If only one argument is provided, it is interpreted as the _max_ value.", - "quantileUniform": r"Returns the quantile value (the inverse of the [cumulative distribution function](https://en.wikipedia.org/wiki/Cumulative_distribution_function)) for the given input _probability_, for a uniform distribution over the interval [_min_, _max_). If unspecified, _min_ defaults to `0` and _max_ defaults to `1`. If only one argument is provided, it is interpreted as the _max_ value.", - "now": r"Returns the timestamp for the current time.", - "datetime": r"Returns a new `Date` instance. The _month_ is 0-based, such that `1` represents February.", - "date": r"Returns the day of the month for the given _datetime_ value, in local time.", - "day": r"Returns the day of the week for the given _datetime_ value, in local time.", - "dayofyear": r"Returns the one-based day of the year for the given _datetime_ value, in local time.", - "year": r"Returns the year for the given _datetime_ value, in local time.", - "quarter": r"Returns the quarter of the year (0-3) for the given _datetime_ value, in local time.", - "month": r"Returns the (zero-based) month for the given _datetime_ value, in local time.", - "week": r"Returns the week number of the year for the given _datetime_, in local time. This function assumes Sunday-based weeks. Days before the first Sunday of the year are considered to be in week 0, the first Sunday of the year is the start of week 1, the second Sunday week 2, _etc._.", - "hours": r"Returns the hours component for the given _datetime_ value, in local time.", - "minutes": r"Returns the minutes component for the given _datetime_ value, in local time.", - "seconds": r"Returns the seconds component for the given _datetime_ value, in local time.", - "milliseconds": r"Returns the milliseconds component for the given _datetime_ value, in local time.", - "time": r"Returns the epoch-based timestamp for the given _datetime_ value.", - "timezoneoffset": r"Returns the timezone offset from the local timezone to UTC for the given _datetime_ value.", - "timeOffset": r"Returns a new `Date` instance that offsets the given _date_ by the specified time [_unit_](../api/time/#time-units) in the local timezone. The optional _step_ argument indicates the number of time unit steps to offset by (default 1).", - "timeSequence": r"Returns an array of `Date` instances from _start_ (inclusive) to _stop_ (exclusive), with each entry separated by the given time [_unit_](../api/time/#time-units) in the local timezone. The optional _step_ argument indicates the number of time unit steps to take between each sequence entry (default 1).", - "utc": r"Returns a timestamp for the given UTC date. The _month_ is 0-based, such that `1` represents February.", - "utcdate": r"Returns the day of the month for the given _datetime_ value, in UTC time.", - "utcday": r"Returns the day of the week for the given _datetime_ value, in UTC time.", - "utcdayofyear": r"Returns the one-based day of the year for the given _datetime_ value, in UTC time.", - "utcyear": r"Returns the year for the given _datetime_ value, in UTC time.", - "utcquarter": r"Returns the quarter of the year (0-3) for the given _datetime_ value, in UTC time.", - "utcmonth": r"Returns the (zero-based) month for the given _datetime_ value, in UTC time.", - "utcweek": r"Returns the week number of the year for the given _datetime_, in UTC time. This function assumes Sunday-based weeks. Days before the first Sunday of the year are considered to be in week 0, the first Sunday of the year is the start of week 1, the second Sunday week 2, _etc._.", - "utchours": r"Returns the hours component for the given _datetime_ value, in UTC time.", - "utcminutes": r"Returns the minutes component for the given _datetime_ value, in UTC time.", - "utcseconds": r"Returns the seconds component for the given _datetime_ value, in UTC time.", - "utcmilliseconds": r"Returns the milliseconds component for the given _datetime_ value, in UTC time.", - "utcOffset": r"Returns a new `Date` instance that offsets the given _date_ by the specified time [_unit_](../api/time/#time-units) in UTC time. The optional _step_ argument indicates the number of time unit steps to offset by (default 1).", - "utcSequence": r"Returns an array of `Date` instances from _start_ (inclusive) to _stop_ (exclusive), with each entry separated by the given time [_unit_](../api/time/#time-units) in UTC time. The optional _step_ argument indicates the number of time unit steps to take between each sequence entry (default 1).", - "extent": r"Returns a new _[min, max]_ array with the minimum and maximum values of the input array, ignoring `null`, `undefined`, and `NaN` values.", - "clampRange": r"Clamps a two-element _range_ array in a span-preserving manner. If the span of the input _range_ is less than _(max - min)_ and an endpoint exceeds either the _min_ or _max_ value, the range is translated such that the span is preserved and one endpoint touches the boundary of the _[min, max]_ range. If the span exceeds _(max - min)_, the range _[min, max]_ is returned.", - "indexof": r"Returns the first index of _value_ in the input _array_, or the first index of _substring_ in the input _string_..", - "inrange": r"Tests whether _value_ lies within (or is equal to either) the first and last values of the _range_ array.", - "join": r"Returns a new string by concatenating all of the elements of the input _array_, separated by commas or a specified _separator_ string.", - "lastindexof": r"Returns the last index of _value_ in the input _array_, or the last index of _substring_ in the input _string_..", - "length": r"Returns the length of the input _array_, or the length of the input _string_.", - "lerp": r"Returns the linearly interpolated value between the first and last entries in the _array_ for the provided interpolation _fraction_ (typically between 0 and 1). For example, `lerp([0, 50], 0.5)` returns 25.", - "peek": r"Returns the last element in the input _array_. Similar to the built-in `Array.pop` method, except that it does not remove the last element. This method is a convenient shorthand for `array[array.length - 1]`.", - "pluck": r"Retrieves the value for the specified *field* from a given *array* of objects. The input *field* string may include nested properties (e.g., `foo.bar.bz`).", - "reverse": r"Returns a new array with elements in a reverse order of the input _array_. The first array element becomes the last, and the last array element becomes the first.", - "sequence": r"Returns an array containing an arithmetic sequence of numbers. If _step_ is omitted, it defaults to 1. If _start_ is omitted, it defaults to 0. The _stop_ value is exclusive; it is not included in the result. If _step_ is positive, the last element is the largest _start + i * step_ less than _stop_; if _step_ is negative, the last element is the smallest _start + i * step_ greater than _stop_. If the returned array would contain an infinite number of values, an empty range is returned. The arguments are not required to be integers.", - "slice": r"Returns a section of _array_ between the _start_ and _end_ indices. If the _end_ argument is negative, it is treated as an offset from the end of the array (_length(array) + end_).", - "span": r"Returns the span of _array_: the difference between the last and first elements, or _array[array.length-1] - array[0]_. Or if input is a string: a section of _string_ between the _start_ and _end_ indices. If the _end_ argument is negative, it is treated as an offset from the end of the string (_length(string) + end_)..", - "lower": r"Transforms _string_ to lower-case letters.", - "pad": r"Pads a _string_ value with repeated instances of a _character_ up to a specified _length_. If _character_ is not specified, a space (' ') is used. By default, padding is added to the end of a string. An optional _align_ parameter specifies if padding should be added to the `'left'` (beginning), `'center'`, or `'right'` (end) of the input string.", - "parseFloat": r"Parses the input _string_ to a floating-point value. Same as JavaScript's `parseFloat`.", - "parseInt": r"Parses the input _string_ to an integer value. Same as JavaScript's `parseInt`.", - "replace": r"Returns a new string with some or all matches of _pattern_ replaced by a _replacement_ string. The _pattern_ can be a string or a regular expression. If _pattern_ is a string, only the first instance will be replaced. Same as [JavaScript's String.replace](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace).", - "split": r"Returns an array of tokens created by splitting the input _string_ according to a provided _separator_ pattern. The result can optionally be constrained to return at most _limit_ tokens.", - "substring": r"Returns a section of _string_ between the _start_ and _end_ indices.", - "trim": r"Returns a trimmed string with preceding and trailing whitespace removed.", - "truncate": r"Truncates an input _string_ to a target _length_. The optional _align_ argument indicates what part of the string should be truncated: `'left'` (the beginning), `'center'`, or `'right'` (the end). By default, the `'right'` end of the string is truncated. The optional _ellipsis_ argument indicates the string to use to indicate truncated content; by default the ellipsis character `...` (`\\u2026`) is used.", - "upper": r"Transforms _string_ to upper-case letters.", - "merge": r"Merges the input objects _object1_, _object2_, etc into a new output object. Inputs are visited in sequential order, such that key values from later arguments can overwrite those from earlier arguments. Example: `merge({a:1, b:2}, {a:3}) -> {a:3, b:2}`.", - "dayFormat": r"Formats a (0-6) _weekday_ number as a full week day name, according to the current locale. For example: `dayFormat(0) -> \"Sunday\"`.", - "dayAbbrevFormat": r"Formats a (0-6) _weekday_ number as an abbreviated week day name, according to the current locale. For example: `dayAbbrevFormat(0) -> \"Sun\"`.", - "format": r"Formats a numeric _value_ as a string. The _specifier_ must be a valid [d3-format specifier](https://github.com/d3/d3-format/) (e.g., `format(value, ',.2f')`.", - "monthFormat": r"Formats a (zero-based) _month_ number as a full month name, according to the current locale. For example: `monthFormat(0) -> \"January\"`.", - "monthAbbrevFormat": r"Formats a (zero-based) _month_ number as an abbreviated month name, according to the current locale. For example: `monthAbbrevFormat(0) -> \"Jan\"`.", - "timeUnitSpecifier": r"Returns a time format specifier string for the given time [_units_](../api/time/#time-units). The optional _specifiers_ object provides a set of specifier sub-strings for customizing the format; for more, see the [timeUnitSpecifier API documentation](../api/time/#timeUnitSpecifier). The resulting specifier string can then be used as input to the [timeFormat](#timeFormat) or [utcFormat](#utcFormat) functions, or as the _format_ parameter of an axis or legend. For example: `timeFormat(date, timeUnitSpecifier('year'))` or `timeFormat(date, timeUnitSpecifier(['hours', 'minutes']))`.", - "timeFormat": r"Formats a datetime _value_ (either a `Date` object or timestamp) as a string, according to the local time. The _specifier_ must be a valid [d3-time-format specifier](https://github.com/d3/d3-time-format/). For example: `timeFormat(timestamp, '%A')`.", - "timeParse": r"Parses a _string_ value to a Date object, according to the local time. The _specifier_ must be a valid [d3-time-format specifier](https://github.com/d3/d3-time-format/). For example: `timeParse('June 30, 2015', '%B %d, %Y')`.", - "utcFormat": r"Formats a datetime _value_ (either a `Date` object or timestamp) as a string, according to [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time) time. The _specifier_ must be a valid [d3-time-format specifier](https://github.com/d3/d3-time-format/). For example: `utcFormat(timestamp, '%A')`.", - "utcParse": r"Parses a _string_ value to a Date object, according to [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time) time. The _specifier_ must be a valid [d3-time-format specifier](https://github.com/d3/d3-time-format/). For example: `utcParse('June 30, 2015', '%B %d, %Y')`.", - "regexp": r"Creates a regular expression instance from an input _pattern_ string and optional _flags_. Same as [JavaScript's `RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp).", - "test": r"Evaluates a regular expression _regexp_ against the input _string_, returning `true` if the string matches the pattern, `false` otherwise. For example: `test(/\\d{3}/, \"32-21-9483\") -> true`.", - "rgb": r"Constructs a new [RGB](https://en.wikipedia.org/wiki/RGB_color_model) color. If _r_, _g_ and _b_ are specified, these represent the channel values of the returned color; an _opacity_ may also be specified. If a CSS Color Module Level 3 _specifier_ string is specified, it is parsed and then converted to the RGB color space. Uses [d3-color's rgb function](https://github.com/d3/d3-color#rgb).", - "hsl": r"Constructs a new [HSL](https://en.wikipedia.org/wiki/HSL_and_HSV) color. If _h_, _s_ and _l_ are specified, these represent the channel values of the returned color; an _opacity_ may also be specified. If a CSS Color Module Level 3 _specifier_ string is specified, it is parsed and then converted to the HSL color space. Uses [d3-color's hsl function](https://github.com/d3/d3-color#hsl).", - "lab": r"Constructs a new [CIE LAB](https://en.wikipedia.org/wiki/Lab_color_space#CIELAB) color. If _l_, _a_ and _b_ are specified, these represent the channel values of the returned color; an _opacity_ may also be specified. If a CSS Color Module Level 3 _specifier_ string is specified, it is parsed and then converted to the LAB color space. Uses [d3-color's lab function](https://github.com/d3/d3-color#lab).", - "hcl": r"Constructs a new [HCL](https://en.wikipedia.org/wiki/Lab_color_space#CIELAB) (hue, chroma, luminance) color. If _h_, _c_ and _l_ are specified, these represent the channel values of the returned color; an _opacity_ may also be specified. If a CSS Color Module Level 3 _specifier_ string is specified, it is parsed and then converted to the HCL color space. Uses [d3-color's hcl function](https://github.com/d3/d3-color#hcl).", - "luminance": r"Returns the luminance for the given color _specifier_ (compatible with [d3-color's rgb function](https://github.com/d3/d3-color#rgb)). The luminance is calculated according to the [W3C Web Content Accessibility Guidelines](https://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef).", - "contrast": r"Returns the contrast ratio between the input color specifiers as a float between 1 and 21. The contrast is calculated according to the [W3C Web Content Accessibility Guidelines](https://www.w3.org/TR/2008/REC-WCAG20-20081211/#contrast-ratiodef).", - "item": r"Returns the current scenegraph item that is the target of the event.", - "group": r"Returns the scenegraph group mark item in which the current event has occurred. If no arguments are provided, the immediate parent group is returned. If a group name is provided, the matching ancestor group item is returned.", - "xy": r"Returns the x- and y-coordinates for the current event as a two-element array. If no arguments are provided, the top-level coordinate space of the view is used. If a scenegraph _item_ (or string group name) is provided, the coordinate space of the group item is used.", - "x": r"Returns the x coordinate for the current event. If no arguments are provided, the top-level coordinate space of the view is used. If a scenegraph _item_ (or string group name) is provided, the coordinate space of the group item is used.", - "y": r"Returns the y coordinate for the current event. If no arguments are provided, the top-level coordinate space of the view is used. If a scenegraph _item_ (or string group name) is provided, the coordinate space of the group item is used.", - "pinchDistance": r"Returns the pixel distance between the first two touch points of a multi-touch event.", - "pinchAngle": r"Returns the angle of the line connecting the first two touch points of a multi-touch event.", - "inScope": r"Returns true if the given scenegraph _item_ is a descendant of the group mark in which the event handler was defined, false otherwise.", - "data": r"Returns the array of data objects for the Vega data set with the given _name_. If the data set is not found, returns an empty array.", - "indata": r"Tests if the data set with a given _name_ contains a datum with a _field_ value that matches the input _value_. For example: `indata('table', 'category', value)`.", - "scale": r"Applies the named scale transform (or projection) to the specified _value_. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale or projection.", - "invert": r"Inverts the named scale transform (or projection) for the specified _value_. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale or projection.", - "copy": r"Returns a copy (a new cloned instance) of the named scale transform of projection, or `undefined` if no scale or projection is found. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale or projection.", - "domain": r"Returns the scale domain array for the named scale transform, or an empty array if the scale is not found. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale.", - "range": r"Returns the scale range array for the named scale transform, or an empty array if the scale is not found. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale.", - "bandwidth": r"Returns the current band width for the named band scale transform, or zero if the scale is not found or is not a band scale. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale.", - "bandspace": r"Returns the number of steps needed within a band scale, based on the _count_ of domain elements and the inner and outer padding values. While normally calculated within the scale itself, this function can be helpful for determining the size of a chart's layout.", - "gradient": r"Returns a linear color gradient for the _scale_ (whose range must be a [continuous color scheme](../schemes)) and starting and ending points _p0_ and _p1_, each an _[x, y]_ array. The points _p0_ and _p1_ should be expressed in normalized coordinates in the domain [0, 1], relative to the bounds of the item being colored. If unspecified, _p0_ defaults to `[0, 0]` and _p1_ defaults to `[1, 0]`, for a horizontal gradient that spans the full bounds of an item. The optional _count_ argument indicates a desired target number of sample points to take from the color scale.", - "panLinear": r"Given a linear scale _domain_ array with numeric or datetime values, returns a new two-element domain array that is the result of panning the domain by a fractional _delta_. The _delta_ value represents fractional units of the scale range; for example, `0.5` indicates panning the scale domain to the right by half the scale range.", - "panLog": r"Given a log scale _domain_ array with numeric or datetime values, returns a new two-element domain array that is the result of panning the domain by a fractional _delta_. The _delta_ value represents fractional units of the scale range; for example, `0.5` indicates panning the scale domain to the right by half the scale range.", - "panPow": r"Given a power scale _domain_ array with numeric or datetime values and the given _exponent_, returns a new two-element domain array that is the result of panning the domain by a fractional _delta_. The _delta_ value represents fractional units of the scale range; for example, `0.5` indicates panning the scale domain to the right by half the scale range.", - "panSymlog": r"Given a symmetric log scale _domain_ array with numeric or datetime values parameterized by the given _constant_, returns a new two-element domain array that is the result of panning the domain by a fractional _delta_. The _delta_ value represents fractional units of the scale range; for example, `0.5` indicates panning the scale domain to the right by half the scale range.", - "zoomLinear": r"Given a linear scale _domain_ array with numeric or datetime values, returns a new two-element domain array that is the result of zooming the domain by a _scaleFactor_, centered at the provided fractional _anchor_. The _anchor_ value represents the zoom position in terms of fractional units of the scale range; for example, `0.5` indicates a zoom centered on the mid-point of the scale range.", - "zoomLog": r"Given a log scale _domain_ array with numeric or datetime values, returns a new two-element domain array that is the result of zooming the domain by a _scaleFactor_, centered at the provided fractional _anchor_. The _anchor_ value represents the zoom position in terms of fractional units of the scale range; for example, `0.5` indicates a zoom centered on the mid-point of the scale range.", - "zoomPow": r"Given a power scale _domain_ array with numeric or datetime values and the given _exponent_, returns a new two-element domain array that is the result of zooming the domain by a _scaleFactor_, centered at the provided fractional _anchor_. The _anchor_ value represents the zoom position in terms of fractional units of the scale range; for example, `0.5` indicates a zoom centered on the mid-point of the scale range.", - "zoomSymlog": r"Given a symmetric log scale _domain_ array with numeric or datetime values parameterized by the given _constant_, returns a new two-element domain array that is the result of zooming the domain by a _scaleFactor_, centered at the provided fractional _anchor_. The _anchor_ value represents the zoom position in terms of fractional units of the scale range; for example, `0.5` indicates a zoom centered on the mid-point of the scale range.", - "geoArea": r"Returns the projected planar area (typically in square pixels) of a GeoJSON _feature_ according to the named _projection_. If the _projection_ argument is `null`, computes the spherical area in steradians using unprojected longitude, latitude coordinates. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the projection. Uses d3-geo's [geoArea](https://github.com/d3/d3-geo#geoArea) and [path.area](https://github.com/d3/d3-geo#path_area) methods.", - "geoBounds": r"Returns the projected planar bounding box (typically in pixels) for the specified GeoJSON _feature_, according to the named _projection_. The bounding box is represented by a two-dimensional array: [[_x0_, _y0_], [_x1_, _y1_]], where _x0_ is the minimum x-coordinate, _y0_ is the minimum y-coordinate, _x1_ is the maximum x-coordinate, and _y1_ is the maximum y-coordinate. If the _projection_ argument is `null`, computes the spherical bounding box using unprojected longitude, latitude coordinates. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the projection. Uses d3-geo's [geoBounds](https://github.com/d3/d3-geo#geoBounds) and [path.bounds](https://github.com/d3/d3-geo#path_bounds) methods.", - "geoCentroid": r"Returns the projected planar centroid (typically in pixels) for the specified GeoJSON _feature_, according to the named _projection_. If the _projection_ argument is `null`, computes the spherical centroid using unprojected longitude, latitude coordinates. The optional _group_ argument takes a scenegraph group mark item to indicate the specific scope in which to look up the projection. Uses d3-geo's [geoCentroid](https://github.com/d3/d3-geo#geoCentroid) and [path.centroid](https://github.com/d3/d3-geo#path_centroid) methods.", - "treePath": r"For the hierarchy data set with the given _name_, returns the shortest path through from the _source_ node id to the _target_ node id. The path starts at the _source_ node, ascends to the least common ancestor of the _source_ node and the _target_ node, and then descends to the _target_ node.", - "treeAncestors": r"For the hierarchy data set with the given _name_, returns the array of ancestors nodes, starting with the input _node_, then followed by each parent up to the root.", - "containerSize": r"Returns the current CSS box size (`[el.clientWidth, el.clientHeight]`) of the parent DOM element that contains the Vega view. If there is no container element, returns `[undefined, undefined]`.", - "screen": r"Returns the [`window.screen`](https://developer.mozilla.org/en-US/docs/Web/API/Window/screen) object, or `{}` if Vega is not running in a browser environment.", - "windowSize": r"Returns the current window size (`[window.innerWidth, window.innerHeight]`) or `[undefined, undefined]` if Vega is not running in a browser environment.", - "warn": r"Logs a warning message and returns the last argument. For the message to appear in the console, the visualization view must have the appropriate logging level set.", - "info": r"Logs an informative message and returns the last argument. For the message to appear in the console, the visualization view must have the appropriate logging level set.", - "debug": r"Logs a debugging message and returns the last argument. For the message to appear in the console, the visualization view must have the appropriate logging level set.", -} - - -# This maps vega expression function names to the Python name -NAME_MAP = {"if": "if_"} - - -class ExprFunc: - def __init__(self, name, doc): - self.name = name - self.doc = doc - self.__doc__ = """{}(*args)\n {}""".format(name, doc) - - def __call__(self, *args): - return FunctionExpression(self.name, args) - - def __repr__(self): - return "".format(self.name) - - -def _populate_namespace(): - globals_ = globals() - for name, doc in FUNCTION_LISTING.items(): - py_name = NAME_MAP.get(name, name) - globals_[py_name] = ExprFunc(name, doc) - yield py_name - - -__all__ = list(_populate_namespace()) diff --git a/spaces/Dagfinn1962/stablediffusion-articlera/README.md b/spaces/Dagfinn1962/stablediffusion-articlera/README.md deleted file mode 100644 index 30dd023f5f82bd96dd1fbff4b783fb799a04f182..0000000000000000000000000000000000000000 --- a/spaces/Dagfinn1962/stablediffusion-articlera/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Daylight (SD) -emoji: 🛕🛕 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: true -duplicated_from: Dagfinn1962/stablediffusion-members ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Datasculptor/DescriptionGPT/tools/create_lvis_21k.py b/spaces/Datasculptor/DescriptionGPT/tools/create_lvis_21k.py deleted file mode 100644 index 3e6fe60a2d579d1ef1f3610f600a915155c81fed..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/DescriptionGPT/tools/create_lvis_21k.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import argparse -import copy -import json - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--imagenet_path', default='datasets/imagenet/annotations/imagenet-21k_image_info.json') - parser.add_argument('--lvis_path', default='datasets/lvis/lvis_v1_train.json') - parser.add_argument('--save_categories', default='') - parser.add_argument('--not_save_imagenet', action='store_true') - parser.add_argument('--not_save_lvis', action='store_true') - parser.add_argument('--mark', default='lvis-21k') - args = parser.parse_args() - - print('Loading', args.imagenet_path) - in_data = json.load(open(args.imagenet_path, 'r')) - print('Loading', args.lvis_path) - lvis_data = json.load(open(args.lvis_path, 'r')) - - categories = copy.deepcopy(lvis_data['categories']) - cat_count = max(x['id'] for x in categories) - synset2id = {x['synset']: x['id'] for x in categories} - name2id = {x['name']: x['id'] for x in categories} - in_id_map = {} - for x in in_data['categories']: - if x['synset'] in synset2id: - in_id_map[x['id']] = synset2id[x['synset']] - elif x['name'] in name2id: - in_id_map[x['id']] = name2id[x['name']] - x['id'] = name2id[x['name']] - else: - cat_count = cat_count + 1 - name2id[x['name']] = cat_count - in_id_map[x['id']] = cat_count - x['id'] = cat_count - categories.append(x) - - print('lvis cats', len(lvis_data['categories'])) - print('imagenet cats', len(in_data['categories'])) - print('merge cats', len(categories)) - - filtered_images = [] - for x in in_data['images']: - x['pos_category_ids'] = [in_id_map[xx] for xx in x['pos_category_ids']] - x['pos_category_ids'] = [xx for xx in \ - sorted(set(x['pos_category_ids'])) if xx >= 0] - if len(x['pos_category_ids']) > 0: - filtered_images.append(x) - - in_data['categories'] = categories - lvis_data['categories'] = categories - - if not args.not_save_imagenet: - in_out_path = args.imagenet_path[:-5] + '_{}.json'.format(args.mark) - for k, v in in_data.items(): - print('imagenet', k, len(v)) - print('Saving Imagenet to', in_out_path) - json.dump(in_data, open(in_out_path, 'w')) - - if not args.not_save_lvis: - lvis_out_path = args.lvis_path[:-5] + '_{}.json'.format(args.mark) - for k, v in lvis_data.items(): - print('lvis', k, len(v)) - print('Saving LVIS to', lvis_out_path) - json.dump(lvis_data, open(lvis_out_path, 'w')) - - if args.save_categories != '': - for x in categories: - for k in ['image_count', 'instance_count', 'synonyms', 'def']: - if k in x: - del x[k] - CATEGORIES = repr(categories) + " # noqa" - open(args.save_categories, 'wt').write(f"CATEGORIES = {CATEGORIES}") diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/discriminator.py b/spaces/Datasculptor/StyleGAN-NADA/e4e/models/discriminator.py deleted file mode 100644 index 16bf3722c7f2e35cdc9bd177a33ed0975e67200d..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/discriminator.py +++ /dev/null @@ -1,20 +0,0 @@ -from torch import nn - - -class LatentCodesDiscriminator(nn.Module): - def __init__(self, style_dim, n_mlp): - super().__init__() - - self.style_dim = style_dim - - layers = [] - for i in range(n_mlp-1): - layers.append( - nn.Linear(style_dim, style_dim) - ) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Linear(512, 1)) - self.mlp = nn.Sequential(*layers) - - def forward(self, w): - return self.mlp(w) diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/utils/data_utils.py b/spaces/Datasculptor/StyleGAN-NADA/e4e/utils/data_utils.py deleted file mode 100644 index f1ba79f4a2d5cc2b97dce76d87bf6e7cdebbc257..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/StyleGAN-NADA/e4e/utils/data_utils.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Code adopted from pix2pixHD: -https://github.com/NVIDIA/pix2pixHD/blob/master/data/image_folder.py -""" -import os - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff' -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def make_dataset(dir): - images = [] - assert os.path.isdir(dir), '%s is not a valid directory' % dir - for root, _, fnames in sorted(os.walk(dir)): - for fname in fnames: - if is_image_file(fname): - path = os.path.join(root, fname) - images.append(path) - return images diff --git a/spaces/Detomo/ai-comic-generation/src/lib/computePercentage.ts b/spaces/Detomo/ai-comic-generation/src/lib/computePercentage.ts deleted file mode 100644 index eaf8c1645451d44bf97a417d04e098e51ec167bb..0000000000000000000000000000000000000000 --- a/spaces/Detomo/ai-comic-generation/src/lib/computePercentage.ts +++ /dev/null @@ -1,4 +0,0 @@ -export function computePercentage(input: string | number) { - // TODO something - return 0 -} \ No newline at end of file diff --git a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/BYTETracker.cpp b/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/BYTETracker.cpp deleted file mode 100644 index 7c936b81f2e95f335ec90b8c355360bc0ebee800..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/BYTETracker.cpp +++ /dev/null @@ -1,241 +0,0 @@ -#include "BYTETracker.h" -#include - -BYTETracker::BYTETracker(int frame_rate, int track_buffer) -{ - track_thresh = 0.5; - high_thresh = 0.6; - match_thresh = 0.8; - - frame_id = 0; - max_time_lost = int(frame_rate / 30.0 * track_buffer); - cout << "Init ByteTrack!" << endl; -} - -BYTETracker::~BYTETracker() -{ -} - -vector BYTETracker::update(const vector& objects) -{ - - ////////////////// Step 1: Get detections ////////////////// - this->frame_id++; - vector activated_stracks; - vector refind_stracks; - vector removed_stracks; - vector lost_stracks; - vector detections; - vector detections_low; - - vector detections_cp; - vector tracked_stracks_swap; - vector resa, resb; - vector output_stracks; - - vector unconfirmed; - vector tracked_stracks; - vector strack_pool; - vector r_tracked_stracks; - - if (objects.size() > 0) - { - for (int i = 0; i < objects.size(); i++) - { - vector tlbr_; - tlbr_.resize(4); - tlbr_[0] = objects[i].rect.x; - tlbr_[1] = objects[i].rect.y; - tlbr_[2] = objects[i].rect.x + objects[i].rect.width; - tlbr_[3] = objects[i].rect.y + objects[i].rect.height; - - float score = objects[i].prob; - - STrack strack(STrack::tlbr_to_tlwh(tlbr_), score); - if (score >= track_thresh) - { - detections.push_back(strack); - } - else - { - detections_low.push_back(strack); - } - - } - } - - // Add newly detected tracklets to tracked_stracks - for (int i = 0; i < this->tracked_stracks.size(); i++) - { - if (!this->tracked_stracks[i].is_activated) - unconfirmed.push_back(&this->tracked_stracks[i]); - else - tracked_stracks.push_back(&this->tracked_stracks[i]); - } - - ////////////////// Step 2: First association, with IoU ////////////////// - strack_pool = joint_stracks(tracked_stracks, this->lost_stracks); - STrack::multi_predict(strack_pool, this->kalman_filter); - - vector > dists; - int dist_size = 0, dist_size_size = 0; - dists = iou_distance(strack_pool, detections, dist_size, dist_size_size); - - vector > matches; - vector u_track, u_detection; - linear_assignment(dists, dist_size, dist_size_size, match_thresh, matches, u_track, u_detection); - - for (int i = 0; i < matches.size(); i++) - { - STrack *track = strack_pool[matches[i][0]]; - STrack *det = &detections[matches[i][1]]; - if (track->state == TrackState::Tracked) - { - track->update(*det, this->frame_id); - activated_stracks.push_back(*track); - } - else - { - track->re_activate(*det, this->frame_id, false); - refind_stracks.push_back(*track); - } - } - - ////////////////// Step 3: Second association, using low score dets ////////////////// - for (int i = 0; i < u_detection.size(); i++) - { - detections_cp.push_back(detections[u_detection[i]]); - } - detections.clear(); - detections.assign(detections_low.begin(), detections_low.end()); - - for (int i = 0; i < u_track.size(); i++) - { - if (strack_pool[u_track[i]]->state == TrackState::Tracked) - { - r_tracked_stracks.push_back(strack_pool[u_track[i]]); - } - } - - dists.clear(); - dists = iou_distance(r_tracked_stracks, detections, dist_size, dist_size_size); - - matches.clear(); - u_track.clear(); - u_detection.clear(); - linear_assignment(dists, dist_size, dist_size_size, 0.5, matches, u_track, u_detection); - - for (int i = 0; i < matches.size(); i++) - { - STrack *track = r_tracked_stracks[matches[i][0]]; - STrack *det = &detections[matches[i][1]]; - if (track->state == TrackState::Tracked) - { - track->update(*det, this->frame_id); - activated_stracks.push_back(*track); - } - else - { - track->re_activate(*det, this->frame_id, false); - refind_stracks.push_back(*track); - } - } - - for (int i = 0; i < u_track.size(); i++) - { - STrack *track = r_tracked_stracks[u_track[i]]; - if (track->state != TrackState::Lost) - { - track->mark_lost(); - lost_stracks.push_back(*track); - } - } - - // Deal with unconfirmed tracks, usually tracks with only one beginning frame - detections.clear(); - detections.assign(detections_cp.begin(), detections_cp.end()); - - dists.clear(); - dists = iou_distance(unconfirmed, detections, dist_size, dist_size_size); - - matches.clear(); - vector u_unconfirmed; - u_detection.clear(); - linear_assignment(dists, dist_size, dist_size_size, 0.7, matches, u_unconfirmed, u_detection); - - for (int i = 0; i < matches.size(); i++) - { - unconfirmed[matches[i][0]]->update(detections[matches[i][1]], this->frame_id); - activated_stracks.push_back(*unconfirmed[matches[i][0]]); - } - - for (int i = 0; i < u_unconfirmed.size(); i++) - { - STrack *track = unconfirmed[u_unconfirmed[i]]; - track->mark_removed(); - removed_stracks.push_back(*track); - } - - ////////////////// Step 4: Init new stracks ////////////////// - for (int i = 0; i < u_detection.size(); i++) - { - STrack *track = &detections[u_detection[i]]; - if (track->score < this->high_thresh) - continue; - track->activate(this->kalman_filter, this->frame_id); - activated_stracks.push_back(*track); - } - - ////////////////// Step 5: Update state ////////////////// - for (int i = 0; i < this->lost_stracks.size(); i++) - { - if (this->frame_id - this->lost_stracks[i].end_frame() > this->max_time_lost) - { - this->lost_stracks[i].mark_removed(); - removed_stracks.push_back(this->lost_stracks[i]); - } - } - - for (int i = 0; i < this->tracked_stracks.size(); i++) - { - if (this->tracked_stracks[i].state == TrackState::Tracked) - { - tracked_stracks_swap.push_back(this->tracked_stracks[i]); - } - } - this->tracked_stracks.clear(); - this->tracked_stracks.assign(tracked_stracks_swap.begin(), tracked_stracks_swap.end()); - - this->tracked_stracks = joint_stracks(this->tracked_stracks, activated_stracks); - this->tracked_stracks = joint_stracks(this->tracked_stracks, refind_stracks); - - //std::cout << activated_stracks.size() << std::endl; - - this->lost_stracks = sub_stracks(this->lost_stracks, this->tracked_stracks); - for (int i = 0; i < lost_stracks.size(); i++) - { - this->lost_stracks.push_back(lost_stracks[i]); - } - - this->lost_stracks = sub_stracks(this->lost_stracks, this->removed_stracks); - for (int i = 0; i < removed_stracks.size(); i++) - { - this->removed_stracks.push_back(removed_stracks[i]); - } - - remove_duplicate_stracks(resa, resb, this->tracked_stracks, this->lost_stracks); - - this->tracked_stracks.clear(); - this->tracked_stracks.assign(resa.begin(), resa.end()); - this->lost_stracks.clear(); - this->lost_stracks.assign(resb.begin(), resb.end()); - - for (int i = 0; i < this->tracked_stracks.size(); i++) - { - if (this->tracked_stracks[i].is_activated) - { - output_stracks.push_back(this->tracked_stracks[i]); - } - } - return output_stracks; -} \ No newline at end of file diff --git a/spaces/ECCV2022/dis-background-removal/app.py b/spaces/ECCV2022/dis-background-removal/app.py deleted file mode 100644 index 4f764bc4c3889494ad71cc5dfa3d33abd436e8da..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/dis-background-removal/app.py +++ /dev/null @@ -1,153 +0,0 @@ -import cv2 -import gradio as gr -import os -from PIL import Image -import numpy as np -import torch -from torch.autograd import Variable -from torchvision import transforms -import torch.nn.functional as F -import gdown -import matplotlib.pyplot as plt -import warnings -warnings.filterwarnings("ignore") - -os.system("git clone https://github.com/xuebinqin/DIS") -os.system("mv DIS/IS-Net/* .") - -# project imports -from data_loader_cache import normalize, im_reader, im_preprocess -from models import * - -#Helpers -device = 'cuda' if torch.cuda.is_available() else 'cpu' - -# Download official weights -if not os.path.exists("saved_models"): - os.mkdir("saved_models") - os.system("mv isnet.pth saved_models/") - -class GOSNormalize(object): - ''' - Normalize the Image using torch.transforms - ''' - def __init__(self, mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]): - self.mean = mean - self.std = std - - def __call__(self,image): - image = normalize(image,self.mean,self.std) - return image - - -transform = transforms.Compose([GOSNormalize([0.5,0.5,0.5],[1.0,1.0,1.0])]) - -def load_image(im_path, hypar): - im = im_reader(im_path) - im, im_shp = im_preprocess(im, hypar["cache_size"]) - im = torch.divide(im,255.0) - shape = torch.from_numpy(np.array(im_shp)) - return transform(im).unsqueeze(0), shape.unsqueeze(0) # make a batch of image, shape - - -def build_model(hypar,device): - net = hypar["model"]#GOSNETINC(3,1) - - # convert to half precision - if(hypar["model_digit"]=="half"): - net.half() - for layer in net.modules(): - if isinstance(layer, nn.BatchNorm2d): - layer.float() - - net.to(device) - - if(hypar["restore_model"]!=""): - net.load_state_dict(torch.load(hypar["model_path"]+"/"+hypar["restore_model"], map_location=device)) - net.to(device) - net.eval() - return net - - -def predict(net, inputs_val, shapes_val, hypar, device): - ''' - Given an Image, predict the mask - ''' - net.eval() - - if(hypar["model_digit"]=="full"): - inputs_val = inputs_val.type(torch.FloatTensor) - else: - inputs_val = inputs_val.type(torch.HalfTensor) - - - inputs_val_v = Variable(inputs_val, requires_grad=False).to(device) # wrap inputs in Variable - - ds_val = net(inputs_val_v)[0] # list of 6 results - - pred_val = ds_val[0][0,:,:,:] # B x 1 x H x W # we want the first one which is the most accurate prediction - - ## recover the prediction spatial size to the orignal image size - pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val,0),(shapes_val[0][0],shapes_val[0][1]),mode='bilinear')) - - ma = torch.max(pred_val) - mi = torch.min(pred_val) - pred_val = (pred_val-mi)/(ma-mi) # max = 1 - - if device == 'cuda': torch.cuda.empty_cache() - return (pred_val.detach().cpu().numpy()*255).astype(np.uint8) # it is the mask we need - -# Set Parameters -hypar = {} # paramters for inferencing - - -hypar["model_path"] ="./saved_models" ## load trained weights from this path -hypar["restore_model"] = "isnet.pth" ## name of the to-be-loaded weights -hypar["interm_sup"] = False ## indicate if activate intermediate feature supervision - -## choose floating point accuracy -- -hypar["model_digit"] = "full" ## indicates "half" or "full" accuracy of float number -hypar["seed"] = 0 - -hypar["cache_size"] = [1024, 1024] ## cached input spatial resolution, can be configured into different size - -## data augmentation parameters --- -hypar["input_size"] = [1024, 1024] ## mdoel input spatial size, usually use the same value hypar["cache_size"], which means we don't further resize the images -hypar["crop_size"] = [1024, 1024] ## random crop size from the input, it is usually set as smaller than hypar["cache_size"], e.g., [920,920] for data augmentation - -hypar["model"] = ISNetDIS() - - # Build Model -net = build_model(hypar, device) - - -def inference(image): - image_path = image - - image_tensor, orig_size = load_image(image_path, hypar) - mask = predict(net, image_tensor, orig_size, hypar, device) - - pil_mask = Image.fromarray(mask).convert('L') - im_rgb = Image.open(image).convert("RGB") - - im_rgba = im_rgb.copy() - im_rgba.putalpha(pil_mask) - - return [im_rgba, pil_mask] - - -title = "Highly Accurate Dichotomous Image Segmentation" -description = "This is an unofficial demo for DIS, a model that can remove the background from a given image. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.
GitHub: https://github.com/xuebinqin/DIS
Telegram bot: https://t.me/restoration_photo_bot
[![](https://img.shields.io/twitter/follow/DoEvent?label=@DoEvent&style=social)](https://twitter.com/DoEvent)" -article = "
visitor badge
" - -interface = gr.Interface( - fn=inference, - inputs=gr.Image(type='filepath'), - outputs=["image", "image"], - examples=[['robot.png'], ['ship.png']], - title=title, - description=description, - article=article, - allow_flagging='never', - cache_examples=False, - ).queue(concurrency_count=1, api_open=True).launch(show_api=True, show_error=True) diff --git a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py b/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py deleted file mode 100644 index a38b7bb3ae3136b07eadfc2db445fef4c2de186b..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/Epoching/GLIDE_Inpaint/glide_text2im/download.py b/spaces/Epoching/GLIDE_Inpaint/glide_text2im/download.py deleted file mode 100644 index c088f0cd090aa873b66d3893798097ac6fadc16d..0000000000000000000000000000000000000000 --- a/spaces/Epoching/GLIDE_Inpaint/glide_text2im/download.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -from functools import lru_cache -from typing import Dict, Optional - -import requests -import torch as th -from filelock import FileLock -from tqdm.auto import tqdm - -MODEL_PATHS = { - "base": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/base.pt", - "upsample": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/upsample.pt", - "base-inpaint": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/base_inpaint.pt", - "upsample-inpaint": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/upsample_inpaint.pt", - "clip/image-enc": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/clip_image_enc.pt", - "clip/text-enc": "https://openaipublic.blob.core.windows.net/diffusion/dec-2021/clip_text_enc.pt", -} - - -@lru_cache() -def default_cache_dir() -> str: - return os.path.join(os.path.abspath(os.getcwd()), "glide_model_cache") - - -def fetch_file_cached( - url: str, progress: bool = True, cache_dir: Optional[str] = None, chunk_size: int = 4096 -) -> str: - """ - Download the file at the given URL into a local file and return the path. - - If cache_dir is specified, it will be used to download the files. - Otherwise, default_cache_dir() is used. - """ - if cache_dir is None: - cache_dir = default_cache_dir() - os.makedirs(cache_dir, exist_ok=True) - response = requests.get(url, stream=True) - size = int(response.headers.get("content-length", "0")) - local_path = os.path.join(cache_dir, url.split("/")[-1]) - with FileLock(local_path + ".lock"): - if os.path.exists(local_path): - return local_path - if progress: - pbar = tqdm(total=size, unit="iB", unit_scale=True) - tmp_path = local_path + ".tmp" - with open(tmp_path, "wb") as f: - for chunk in response.iter_content(chunk_size): - if progress: - pbar.update(len(chunk)) - f.write(chunk) - os.rename(tmp_path, local_path) - if progress: - pbar.close() - return local_path - - -def load_checkpoint( - checkpoint_name: str, - device: th.device, - progress: bool = True, - cache_dir: Optional[str] = None, - chunk_size: int = 4096, -) -> Dict[str, th.Tensor]: - if checkpoint_name not in MODEL_PATHS: - raise ValueError( - f"Unknown checkpoint name {checkpoint_name}. Known names are: {MODEL_PATHS.keys()}." - ) - path = fetch_file_cached( - MODEL_PATHS[checkpoint_name], progress=progress, cache_dir=cache_dir, chunk_size=chunk_size - ) - return th.load(path, map_location=device) diff --git a/spaces/EronSamez/RVC_HFmeu/julius/bands.py b/spaces/EronSamez/RVC_HFmeu/julius/bands.py deleted file mode 100644 index ef2162440b69e960770aa7bf81b9aaec48a63243..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/julius/bands.py +++ /dev/null @@ -1,119 +0,0 @@ -# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. -# Author: adefossez, 2020 -""" -Decomposition of a signal over frequency bands in the waveform domain. -""" -from typing import Optional, Sequence -import torch - -from .core import mel_frequencies -from .lowpass import LowPassFilters -from .utils import simple_repr - - -class SplitBands(torch.nn.Module): - """ - Decomposes a signal over the given frequency bands in the waveform domain using - a cascade of low pass filters as implemented by `julius.lowpass.LowPassFilters`. - You can either specify explicitely the frequency cutoffs, or just the number of bands, - in which case the frequency cutoffs will be spread out evenly in mel scale. - - Args: - sample_rate (float): Sample rate of the input signal in Hz. - n_bands (int or None): number of bands, when not giving them explictely with `cutoffs`. - In that case, the cutoff frequencies will be evenly spaced in mel-space. - cutoffs (list[float] or None): list of frequency cutoffs in Hz. - pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`, - the output will have the same length as the input. - zeros (float): Number of zero crossings to keep. See `LowPassFilters` for more informations. - fft (bool or None): See `LowPassFilters` for more info. - - ..note:: - The sum of all the bands will always be the input signal. - - ..warning:: - Unlike `julius.lowpass.LowPassFilters`, the cutoffs frequencies must be provided in Hz along - with the sample rate. - - Shape: - - - Input: `[*, T]` - - Output: `[B, *, T']`, with `T'=T` if `pad` is True. - If `n_bands` was provided, `B = n_bands` otherwise `B = len(cutoffs) + 1` - - >>> bands = SplitBands(sample_rate=128, n_bands=10) - >>> x = torch.randn(6, 4, 1024) - >>> list(bands(x).shape) - [10, 6, 4, 1024] - """ - - def __init__(self, sample_rate: float, n_bands: Optional[int] = None, - cutoffs: Optional[Sequence[float]] = None, pad: bool = True, - zeros: float = 8, fft: Optional[bool] = None): - super().__init__() - if (cutoffs is None) + (n_bands is None) != 1: - raise ValueError("You must provide either n_bands, or cutoffs, but not boths.") - - self.sample_rate = sample_rate - self.n_bands = n_bands - self._cutoffs = list(cutoffs) if cutoffs is not None else None - self.pad = pad - self.zeros = zeros - self.fft = fft - - if cutoffs is None: - if n_bands is None: - raise ValueError("You must provide one of n_bands or cutoffs.") - if not n_bands >= 1: - raise ValueError(f"n_bands must be greater than one (got {n_bands})") - cutoffs = mel_frequencies(n_bands + 1, 0, sample_rate / 2)[1:-1] - else: - if max(cutoffs) > 0.5 * sample_rate: - raise ValueError("A cutoff above sample_rate/2 does not make sense.") - if len(cutoffs) > 0: - self.lowpass = LowPassFilters( - [c / sample_rate for c in cutoffs], pad=pad, zeros=zeros, fft=fft) - else: - # Here I cannot make both TorchScript and MyPy happy. - # I miss the good old times, before all this madness was created. - self.lowpass = None # type: ignore - - def forward(self, input): - if self.lowpass is None: - return input[None] - lows = self.lowpass(input) - low = lows[0] - bands = [low] - for low_and_band in lows[1:]: - # Get a bandpass filter by substracting lowpasses - band = low_and_band - low - bands.append(band) - low = low_and_band - # Last band is whatever is left in the signal - bands.append(input - low) - return torch.stack(bands) - - @property - def cutoffs(self): - if self._cutoffs is not None: - return self._cutoffs - elif self.lowpass is not None: - return [c * self.sample_rate for c in self.lowpass.cutoffs] - else: - return [] - - def __repr__(self): - return simple_repr(self, overrides={"cutoffs": self._cutoffs}) - - -def split_bands(signal: torch.Tensor, sample_rate: float, n_bands: Optional[int] = None, - cutoffs: Optional[Sequence[float]] = None, pad: bool = True, - zeros: float = 8, fft: Optional[bool] = None): - """ - Functional version of `SplitBands`, refer to this class for more information. - - >>> x = torch.randn(6, 4, 1024) - >>> list(split_bands(x, sample_rate=64, cutoffs=[12, 24]).shape) - [3, 6, 4, 1024] - """ - return SplitBands(sample_rate, n_bands, cutoffs, pad, zeros, fft).to(signal)(signal) diff --git a/spaces/FarziBuilder/Last/app.py b/spaces/FarziBuilder/Last/app.py deleted file mode 100644 index df737a44a2d8114eb63958597d38b256e88b30c9..0000000000000000000000000000000000000000 --- a/spaces/FarziBuilder/Last/app.py +++ /dev/null @@ -1,60 +0,0 @@ -# This Python 3 environment comes with many helpful analytics libraries installed -# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python -# For example, here's several helpful packages to load - -import numpy as np # linear algebra -import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) - -# Input data files are available in the read-only "../input/" directory -# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory - -import os -for dirname, _, filenames in os.walk('/kaggle/input'): - for filename in filenames: - print(os.path.join(dirname, filename)) - -# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" -# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session - -#|default_exp app - -#|export -#!pip install fastbook -import fastbook -from fastbook import * -#!pip install fastai -from fastai.vision.widgets import * -#!pip install gradio -import gradio as gr - -import IPython -from IPython.display import display -from PIL import Image - -import pathlib -temp = pathlib.PosixPath -pathlib.PosixPath = pathlib.WindowsPath - -def search_images(term, max_images=50): - print(f"Searching for '{term}'") - return search_images_ddg(term, max_images) - -learn = load_learner('model.pkl') - -breeds = ('Labrador Retrievers','German Shepherds','Golden Retrievers','French Bulldogs','Bulldogs','Beagles','Poodles','Rottweilers','Chihuahua') - -def classify_image(img): - pred,idx,probs = learn.predict(img) - #return dict(zip(breeds, map(float,probs))) - return "This is " + pred - -image = gr.components.Image() -label = gr.components.Label() - -examples = ['dog.jpg','labrador.jpeg','dunno.jpg'] - -for x in examples: - Image.open(x) - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False,share = True) \ No newline at end of file diff --git a/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py b/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py deleted file mode 100644 index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000 --- a/spaces/Fengbinbin/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py +++ /dev/null @@ -1,245 +0,0 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import numpy as np -import torch as th -from torch.nn import functional as F - -from stable_baselines3.common import logger -from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm -from stable_baselines3.common.preprocessing import maybe_transpose -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update -from stable_baselines3.dqn.policies import DQNPolicy - - -class DQN(OffPolicyAlgorithm): - """ - Deep Q-Network (DQN) - - Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236 - Default hyperparameters are taken from the nature paper, - except for the optimizer and learning rate that were taken from Stable Baselines defaults. - - :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) - :param env: The environment to learn from (if registered in Gym, can be str) - :param learning_rate: The learning rate, it can be a function - of the current progress remaining (from 1 to 0) - :param buffer_size: size of the replay buffer - :param learning_starts: how many steps of the model to collect transitions for before learning starts - :param batch_size: Minibatch size for each gradient update - :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update - :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit - like ``(5, "step")`` or ``(2, "episode")``. - :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) - Set to ``-1`` means to do as many gradient steps as steps done in the environment - during the rollout. - :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer - at a cost of more complexity. - See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 - :param target_update_interval: update the target network every ``target_update_interval`` - environment steps. - :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced - :param exploration_initial_eps: initial value of random action probability - :param exploration_final_eps: final value of random action probability - :param max_grad_norm: The maximum value for the gradient clipping - :param tensorboard_log: the log location for tensorboard (if None, no logging) - :param create_eval_env: Whether to create a second environment that will be - used for evaluating the agent periodically. (Only available when passing string for the environment) - :param policy_kwargs: additional arguments to be passed to the policy on creation - :param verbose: the verbosity level: 0 no output, 1 info, 2 debug - :param seed: Seed for the pseudo random generators - :param device: Device (cpu, cuda, ...) on which the code should be run. - Setting it to auto, the code will be run on the GPU if possible. - :param _init_setup_model: Whether or not to build the network at the creation of the instance - """ - - def __init__( - self, - policy: Union[str, Type[DQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, - learning_starts: int = 50000, - batch_size: Optional[int] = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 0, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): - - super(DQN, self).__init__( - policy, - env, - DQNPolicy, - learning_rate, - buffer_size, - learning_starts, - batch_size, - tau, - gamma, - train_freq, - gradient_steps, - action_noise=None, # No action noise - policy_kwargs=policy_kwargs, - tensorboard_log=tensorboard_log, - verbose=verbose, - device=device, - create_eval_env=create_eval_env, - seed=seed, - sde_support=False, - optimize_memory_usage=optimize_memory_usage, - supported_action_spaces=(gym.spaces.Discrete,), - ) - - self.exploration_initial_eps = exploration_initial_eps - self.exploration_final_eps = exploration_final_eps - self.exploration_fraction = exploration_fraction - self.target_update_interval = target_update_interval - self.max_grad_norm = max_grad_norm - # "epsilon" for the epsilon-greedy exploration - self.exploration_rate = 0.0 - # Linear schedule will be defined in `_setup_model()` - self.exploration_schedule = None - self.q_net, self.q_net_target = None, None - - if _init_setup_model: - self._setup_model() - - def _setup_model(self) -> None: - super(DQN, self)._setup_model() - self._create_aliases() - self.exploration_schedule = get_linear_fn( - self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction - ) - - def _create_aliases(self) -> None: - self.q_net = self.policy.q_net - self.q_net_target = self.policy.q_net_target - - def _on_step(self) -> None: - """ - Update the exploration rate and target network if needed. - This method is called in ``collect_rollouts()`` after each step in the environment. - """ - if self.num_timesteps % self.target_update_interval == 0: - polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau) - - self.exploration_rate = self.exploration_schedule(self._current_progress_remaining) - logger.record("rollout/exploration rate", self.exploration_rate) - - def train(self, gradient_steps: int, batch_size: int = 100) -> None: - # Update learning rate according to schedule - self._update_learning_rate(self.policy.optimizer) - - losses = [] - for _ in range(gradient_steps): - # Sample replay buffer - replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) - - with th.no_grad(): - # Compute the next Q-values using the target network - next_q_values = self.q_net_target(replay_data.next_observations) - # Follow greedy policy: use the one with the highest value - next_q_values, _ = next_q_values.max(dim=1) - # Avoid potential broadcast issue - next_q_values = next_q_values.reshape(-1, 1) - # 1-step TD target - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates - current_q_values = self.q_net(replay_data.observations) - - # Retrieve the q-values for the actions from the replay buffer - current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) - - # Compute Huber loss (less sensitive to outliers) - loss = F.smooth_l1_loss(current_q_values, target_q_values) - losses.append(loss.item()) - - # Optimize the policy - self.policy.optimizer.zero_grad() - loss.backward() - # Clip gradient norm - th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) - self.policy.optimizer.step() - - # Increase update counter - self._n_updates += gradient_steps - - logger.record("train/n_updates", self._n_updates, exclude="tensorboard") - logger.record("train/loss", np.mean(losses)) - - def predict( - self, - observation: np.ndarray, - state: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, - deterministic: bool = False, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - """ - Overrides the base_class predict function to include epsilon-greedy exploration. - - :param observation: the input observation - :param state: The last states (can be None, used in recurrent policies) - :param mask: The last masks (can be None, used in recurrent policies) - :param deterministic: Whether or not to return deterministic actions. - :return: the model's action and the next state - (used in recurrent policies) - """ - if not deterministic and np.random.rand() < self.exploration_rate: - if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space): - n_batch = observation.shape[0] - action = np.array([self.action_space.sample() for _ in range(n_batch)]) - else: - action = np.array(self.action_space.sample()) - else: - action, state = self.policy.predict(observation, state, mask, deterministic) - return action, state - - def learn( - self, - total_timesteps: int, - callback: MaybeCallback = None, - log_interval: int = 4, - eval_env: Optional[GymEnv] = None, - eval_freq: int = -1, - n_eval_episodes: int = 5, - tb_log_name: str = "DQN", - eval_log_path: Optional[str] = None, - reset_num_timesteps: bool = True, - ) -> OffPolicyAlgorithm: - - return super(DQN, self).learn( - total_timesteps=total_timesteps, - callback=callback, - log_interval=log_interval, - eval_env=eval_env, - eval_freq=eval_freq, - n_eval_episodes=n_eval_episodes, - tb_log_name=tb_log_name, - eval_log_path=eval_log_path, - reset_num_timesteps=reset_num_timesteps, - ) - - def _excluded_save_params(self) -> List[str]: - return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"] - - def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: - state_dicts = ["policy", "policy.optimizer"] - - return state_dicts, [] diff --git a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/ngu_dialect.py b/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/ngu_dialect.py deleted file mode 100644 index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/ngu_dialect.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import opencc - - -dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', - 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', - 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', - 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', - 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', - 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} - -converters = {} - -for dialect in dialects.values(): - try: - converters[dialect] = opencc.OpenCC(dialect) - except: - pass - - -def ngu_dialect_to_ipa(text, dialect): - dialect = dialects[dialect] - text = converters[dialect].convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/FridaZuley/RVC_HFKawaii/Applio-RVC-Fork/utils/backups.py b/spaces/FridaZuley/RVC_HFKawaii/Applio-RVC-Fork/utils/backups.py deleted file mode 100644 index b814f8184792e80e2324685436053d61487110b1..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/Applio-RVC-Fork/utils/backups.py +++ /dev/null @@ -1,141 +0,0 @@ -import os -import shutil -import hashlib -import time -import base64 - - - - -LOGS_FOLDER = '/content/Applio-RVC-Fork/logs' -WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights' -GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' - -def import_google_drive_backup(): - print("Importing Google Drive backup...") - weights_exist = False - for root, dirs, files in os.walk(GOOGLE_DRIVE_PATH): - for filename in files: - filepath = os.path.join(root, filename) - if os.path.isfile(filepath) and not filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')): - backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH)) - backup_folderpath = os.path.dirname(backup_filepath) - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created backup folder: {backup_folderpath}', flush=True) - shutil.copy2(filepath, backup_filepath) # copy file with metadata - print(f'Imported file from Google Drive backup: {filename}') - elif filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')) and filename.endswith('.pth'): - weights_exist = True - weights_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, os.path.join(GOOGLE_DRIVE_PATH, 'weights'))) - weights_folderpath = os.path.dirname(weights_filepath) - if not os.path.exists(weights_folderpath): - os.makedirs(weights_folderpath) - print(f'Created weights folder: {weights_folderpath}', flush=True) - shutil.copy2(filepath, weights_filepath) # copy file with metadata - print(f'Imported file from weights: {filename}') - if weights_exist: - print("Copied weights from Google Drive backup to local weights folder.") - else: - print("No weights found in Google Drive backup.") - print("Google Drive backup import completed.") - -def get_md5_hash(file_path): - hash_md5 = hashlib.md5() - with open(file_path, "rb") as f: - for chunk in iter(lambda: f.read(4096), b""): - hash_md5.update(chunk) - return hash_md5.hexdigest() - -def copy_weights_folder_to_drive(): - destination_folder = os.path.join(GOOGLE_DRIVE_PATH, 'weights') - try: - if not os.path.exists(destination_folder): - os.makedirs(destination_folder) - - num_copied = 0 - for filename in os.listdir(WEIGHTS_FOLDER): - if filename.endswith('.pth'): - source_file = os.path.join(WEIGHTS_FOLDER, filename) - destination_file = os.path.join(destination_folder, filename) - if not os.path.exists(destination_file): - shutil.copy2(source_file, destination_file) - num_copied += 1 - print(f"Copied {filename} to Google Drive!") - - if num_copied == 0: - print("No new finished models found for copying.") - else: - print(f"Finished copying {num_copied} files to Google Drive!") - - except Exception as e: - print(f"An error occurred while copying weights: {str(e)}") - # You can log the error or take appropriate actions here. - -def backup_files(): - print("\nStarting backup loop...") - last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt') - fully_updated = False # boolean to track if all files are up to date - - while True: - try: - updated = False # flag to check if any files were updated - last_backup_timestamps = {} - - try: - with open(last_backup_timestamps_path, 'r') as f: - last_backup_timestamps = dict(line.strip().split(':') for line in f) - except FileNotFoundError: - pass # File does not exist yet, which is fine - - for root, dirs, files in os.walk(LOGS_FOLDER): - for filename in files: - if filename != 'last_backup_timestamps.txt': - filepath = os.path.join(root, filename) - if os.path.isfile(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - backup_folderpath = os.path.dirname(backup_filepath) - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created backup folder: {backup_folderpath}', flush=True) - # check if file has changed since last backup - last_backup_timestamp = last_backup_timestamps.get(filepath) - current_timestamp = os.path.getmtime(filepath) - if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp: - shutil.copy2(filepath, backup_filepath) # copy file with metadata - last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp - if last_backup_timestamp is None: - print(f'Backed up file: {filename}') - else: - print(f'Updating backed up file: {filename}') - updated = True - fully_updated = False # if a file is updated, all files are not up to date - - # check if any files were deleted in Colab and delete them from the backup drive - for filepath in list(last_backup_timestamps.keys()): - if not os.path.exists(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - if os.path.exists(backup_filepath): - os.remove(backup_filepath) - print(f'Deleted file: {filepath}') - del last_backup_timestamps[filepath] - updated = True - fully_updated = False # if a file is deleted, all files are not up to date - - if not updated and not fully_updated: - print("Files are up to date.") - fully_updated = True # if all files are up to date, set the boolean to True - copy_weights_folder_to_drive() - sleep_time = 15 - else: - sleep_time = 0.1 - - with open(last_backup_timestamps_path, 'w') as f: - for filepath, timestamp in last_backup_timestamps.items(): - f.write(f'{filepath}:{timestamp}\n') - - time.sleep(sleep_time) # wait for 15 seconds before checking again, or 0.1s if not fully up to date to speed up backups - - except Exception as e: - print(f"An error occurred: {str(e)}") - # You can log the error or take appropriate actions here. diff --git a/spaces/GXSA/bingo/src/components/welcome-screen.tsx b/spaces/GXSA/bingo/src/components/welcome-screen.tsx deleted file mode 100644 index f7449fcbb6c621875e235db98f2790bf7894fb0a..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/components/welcome-screen.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { useBing } from '@/lib/hooks/use-bing' - -const exampleMessages = [ - { - heading: '🧐 提出复杂问题', - message: `我可以为我挑剔的只吃橙色食物的孩子做什么饭?` - }, - { - heading: '🙌 获取更好的答案', - message: '销量最高的 3 种宠物吸尘器有哪些优点和缺点?' - }, - { - heading: '🎨 获得创意灵感', - message: `以海盗的口吻写一首关于外太空鳄鱼的俳句` - } -] - -export function WelcomeScreen({ setInput }: Pick, 'setInput'>) { - return ( -
- {exampleMessages.map(example => ( - - ))} -
- ) -} diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train5_gptmixcliport3.sh b/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train5_gptmixcliport3.sh deleted file mode 100644 index f32b5f70965c26211dc1326dc678b10bd0c31d8c..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train5_gptmixcliport3.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -#SBATCH -c 10 -#SBATCH -n 1 -#SBATCH -o logs/%j.out -#SBATCH --exclusive -STEPS=${1-'50000'} - - -sh scripts/traintest_scripts/train_test_multi_task_goal.sh data \ - "[put-block-in-bowl,align-box-corner,stack-block-pyramid-seq,color-coordinated-sphere-insertion,rainbow-stack,align-pair-colored-blocks-along-line,vertical-insertion-blocks,stack-blocks-in-container]" \ - "[put-block-in-bowl,align-box-corner,stack-block-pyramid-seq]" \ - gpt5_mixcliport3_task $STEPS diff --git a/spaces/GilbertClaus/VideoCutter/README.md b/spaces/GilbertClaus/VideoCutter/README.md deleted file mode 100644 index 5a3d1cc64b4b56b7bf0388fbb964710f1e7e1e05..0000000000000000000000000000000000000000 --- a/spaces/GilbertClaus/VideoCutter/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Downloader and Cutter -emoji: ✂️ -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.26.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/GitMylo/bark-voice-cloning/hubert/__init__.py b/spaces/GitMylo/bark-voice-cloning/hubert/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/python/dqn/policies.py b/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/python/dqn/policies.py deleted file mode 100644 index 4ecf39a5fc04b24ad1b809232b186728366987b6..0000000000000000000000000000000000000000 --- a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/python/dqn/policies.py +++ /dev/null @@ -1,237 +0,0 @@ -from typing import Any, Dict, List, Optional, Type - -import gym -import torch as th -from torch import nn - -from stable_baselines3.common.policies import BasePolicy, register_policy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp -from stable_baselines3.common.type_aliases import Schedule - - -class QNetwork(BasePolicy): - """ - Action-Value (Q-Value) network for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True, - ): - super(QNetwork, self).__init__( - observation_space, - action_space, - features_extractor=features_extractor, - normalize_images=normalize_images, - ) - - if net_arch is None: - net_arch = [64, 64] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.features_extractor = features_extractor - self.features_dim = features_dim - self.normalize_images = normalize_images - action_dim = self.action_space.n # number of actions - q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn) - self.q_net = nn.Sequential(*q_net) - - def forward(self, obs: th.Tensor) -> th.Tensor: - """ - Predict the q-values. - - :param obs: Observation - :return: The estimated Q-Value for each action. - """ - return self.q_net(self.extract_features(obs)) - - def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor: - q_values = self.forward(observation) - # Greedy action - action = q_values.argmax(dim=1).reshape(-1) - return action - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_arch, - features_dim=self.features_dim, - activation_fn=self.activation_fn, - features_extractor=self.features_extractor, - ) - ) - return data - - -class DQNPolicy(BasePolicy): - """ - Policy class with Q-Value Net and target net for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param features_extractor_kwargs: Keyword arguments - to pass to the features extractor. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(DQNPolicy, self).__init__( - observation_space, - action_space, - features_extractor_class, - features_extractor_kwargs, - optimizer_class=optimizer_class, - optimizer_kwargs=optimizer_kwargs, - ) - - if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [64, 64] - else: - net_arch = [] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.normalize_images = normalize_images - - self.net_args = { - "observation_space": self.observation_space, - "action_space": self.action_space, - "net_arch": self.net_arch, - "activation_fn": self.activation_fn, - "normalize_images": normalize_images, - } - - self.q_net, self.q_net_target = None, None - self._build(lr_schedule) - - def _build(self, lr_schedule: Schedule) -> None: - """ - Create the network and the optimizer. - - :param lr_schedule: Learning rate schedule - lr_schedule(1) is the initial learning rate - """ - - self.q_net = self.make_q_net() - self.q_net_target = self.make_q_net() - self.q_net_target.load_state_dict(self.q_net.state_dict()) - - # Setup optimizer with initial learning rate - self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) - - def make_q_net(self) -> QNetwork: - # Make sure we always have separate networks for features extractors etc - net_args = self._update_features_extractor(self.net_args, features_extractor=None) - return QNetwork(**net_args).to(self.device) - - def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self._predict(obs, deterministic=deterministic) - - def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self.q_net._predict(obs, deterministic=deterministic) - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_args["net_arch"], - activation_fn=self.net_args["activation_fn"], - lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone - optimizer_class=self.optimizer_class, - optimizer_kwargs=self.optimizer_kwargs, - features_extractor_class=self.features_extractor_class, - features_extractor_kwargs=self.features_extractor_kwargs, - ) - ) - return data - - -MlpPolicy = DQNPolicy - - -class CnnPolicy(DQNPolicy): - """ - Policy class for DQN when using images as input. - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(CnnPolicy, self).__init__( - observation_space, - action_space, - lr_schedule, - net_arch, - activation_fn, - features_extractor_class, - features_extractor_kwargs, - normalize_images, - optimizer_class, - optimizer_kwargs, - ) - - -register_policy("MlpPolicy", MlpPolicy) -register_policy("CnnPolicy", CnnPolicy) diff --git "a/spaces/Gmq-x/gpt-academic/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" "b/spaces/Gmq-x/gpt-academic/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" deleted file mode 100644 index c28f2aae41a8e45cdb1cda0950efce00c9ad8826..0000000000000000000000000000000000000000 --- "a/spaces/Gmq-x/gpt-academic/crazy_functions/\350\257\242\351\227\256\345\244\232\344\270\252\345\244\247\350\257\255\350\250\200\346\250\241\345\236\213.py" +++ /dev/null @@ -1,30 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime -@CatchException -def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,如温度和top_p等,一般原样传递下去就行 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((txt, "正在同时咨询gpt-3.5和gpt-4……")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = 'gpt-3.5-turbo&gpt-4' # 支持任意数量的llm接口,用&符号分隔 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=txt, inputs_show_user=txt, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt=system_prompt, - retry_times_at_unknown_error=0 - ) - - history.append(txt) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 \ No newline at end of file diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/utils/image_quality.py b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/utils/image_quality.py deleted file mode 100644 index c7bf0e51cac541f1872e1bc82ff359c3b2b2fdaa..0000000000000000000000000000000000000000 --- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/utils/image_quality.py +++ /dev/null @@ -1,191 +0,0 @@ -# Pytorch Multi-Scale Structural Similarity Index (SSIM) -# This code is written by jorge-pessoa (https://github.com/jorge-pessoa/pytorch-msssim) -# MIT licence -import math -from math import exp - -import torch -import torch.nn.functional as F -from torch.autograd import Variable - - -# +++++++++++++++++++++++++++++++++++++ -# SSIM -# ------------------------------------- - - -def gaussian(window_size, sigma): - gauss = torch.Tensor( - [ - exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2)) - for x in range(window_size) - ] - ) - return gauss / gauss.sum() - - -def create_window(window_size, channel): - _1D_window = gaussian(window_size, 1.5).unsqueeze(1) - _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) - window = Variable( - _2D_window.expand(channel, 1, window_size, window_size).contiguous() - ) - return window - - -def _ssim(img1, img2, window, window_size, channel, size_average=True, full=False): - padd = 0 - - mu1 = F.conv2d(img1, window, padding=padd, groups=channel) - mu2 = F.conv2d(img2, window, padding=padd, groups=channel) - - mu1_sq = mu1.pow(2) - mu2_sq = mu2.pow(2) - mu1_mu2 = mu1 * mu2 - - sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq - sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq - sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2 - - C1 = 0.01**2 - C2 = 0.03**2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( - (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2) - ) - - v1 = 2.0 * sigma12 + C2 - v2 = sigma1_sq + sigma2_sq + C2 - cs = torch.mean(v1 / v2) - - if size_average: - ret = ssim_map.mean() - else: - ret = ssim_map.mean(1).mean(1).mean(1) - - if full: - return ret, cs - return ret - - -class SSIM(torch.nn.Module): - def __init__(self, window_size=11, size_average=True): - super(SSIM, self).__init__() - self.window_size = window_size - self.size_average = size_average - self.channel = 1 - self.window = create_window(window_size, self.channel) - - def forward(self, img1, img2): - (_, channel, _, _) = img1.size() - - if channel == self.channel and self.window.data.type() == img1.data.type(): - window = self.window - else: - window = create_window(self.window_size, channel) - - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - - self.window = window - self.channel = channel - - return _ssim(img1, img2, window, self.window_size, channel, self.size_average) - - -def ssim(img1, img2, window_size=11, size_average=True, full=False): - (_, channel, height, width) = img1.size() - - real_size = min(window_size, height, width) - window = create_window(real_size, channel) - - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - - return _ssim(img1, img2, window, real_size, channel, size_average, full=full) - - -def msssim(img1, img2, window_size=11, size_average=True): - # TODO: fix NAN results - if img1.size() != img2.size(): - raise RuntimeError( - "Input images must have the same shape (%s vs. %s)." - % (img1.size(), img2.size()) - ) - if len(img1.size()) != 4: - raise RuntimeError( - "Input images must have four dimensions, not %d" % len(img1.size()) - ) - - weights = torch.tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=img1.dtype) - if img1.is_cuda: - weights = weights.cuda(img1.get_device()) - - levels = weights.size()[0] - mssim = [] - mcs = [] - for _ in range(levels): - sim, cs = ssim( - img1, img2, window_size=window_size, size_average=size_average, full=True - ) - mssim.append(sim) - mcs.append(cs) - - img1 = F.avg_pool2d(img1, (2, 2)) - img2 = F.avg_pool2d(img2, (2, 2)) - - mssim = torch.stack(mssim) - mcs = torch.stack(mcs) - return torch.prod(mcs[0 : levels - 1] ** weights[0 : levels - 1]) * ( - mssim[levels - 1] ** weights[levels - 1] - ) - - -class MSSSIM(torch.nn.Module): - def __init__(self, window_size=11, size_average=True, channel=3): - super(MSSSIM, self).__init__() - self.window_size = window_size - self.size_average = size_average - self.channel = channel - - def forward(self, img1, img2): - # TODO: store window between calls if possible - return msssim( - img1, img2, window_size=self.window_size, size_average=self.size_average - ) - - -def calc_psnr(sr, hr, scale=0, benchmark=False): - # adapt from EDSR: https://github.com/thstkdgus35/EDSR-PyTorch - diff = (sr - hr).data - if benchmark: - shave = scale - if diff.size(1) > 1: - convert = diff.new(1, 3, 1, 1) - convert[0, 0, 0, 0] = 65.738 - convert[0, 1, 0, 0] = 129.057 - convert[0, 2, 0, 0] = 25.064 - diff.mul_(convert).div_(256) - diff = diff.sum(dim=1, keepdim=True) - else: - shave = scale + 6 - - valid = diff[:, :, shave:-shave, shave:-shave] - mse = valid.pow(2).mean() - - return -10 * math.log10(mse) - - -# +++++++++++++++++++++++++++++++++++++ -# PSNR -# ------------------------------------- -from torch import nn - - -def psnr(predict, target): - with torch.no_grad(): - criteria = nn.MSELoss() - mse = criteria(predict, target) - return -10 * torch.log10(mse) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/paa/paa_r50_fpn_1.5x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/paa/paa_r50_fpn_1.5x_coco.py deleted file mode 100644 index aabce4af987aa5504e1748e10b9955f760a013e1..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/paa/paa_r50_fpn_1.5x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './paa_r50_fpn_1x_coco.py' -lr_config = dict(step=[12, 16]) -runner = dict(type='EpochBasedRunner', max_epochs=18) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/utils.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/utils.py deleted file mode 100644 index 157c9a2e1fe009552fdec9b9c9e7a33ed46d51ff..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import copy -import warnings - -from mmcv.cnn import VGG -from mmcv.runner.hooks import HOOKS, Hook - -from mmdet.datasets.builder import PIPELINES -from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile -from mmdet.models.dense_heads import GARPNHead, RPNHead -from mmdet.models.roi_heads.mask_heads import FusedSemanticHead - - -def replace_ImageToTensor(pipelines): - """Replace the ImageToTensor transform in a data pipeline to - DefaultFormatBundle, which is normally useful in batch inference. - - Args: - pipelines (list[dict]): Data pipeline configs. - - Returns: - list: The new pipeline list with all ImageToTensor replaced by - DefaultFormatBundle. - - Examples: - >>> pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict( - ... type='MultiScaleFlipAug', - ... img_scale=(1333, 800), - ... flip=False, - ... transforms=[ - ... dict(type='Resize', keep_ratio=True), - ... dict(type='RandomFlip'), - ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), - ... dict(type='Pad', size_divisor=32), - ... dict(type='ImageToTensor', keys=['img']), - ... dict(type='Collect', keys=['img']), - ... ]) - ... ] - >>> expected_pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict( - ... type='MultiScaleFlipAug', - ... img_scale=(1333, 800), - ... flip=False, - ... transforms=[ - ... dict(type='Resize', keep_ratio=True), - ... dict(type='RandomFlip'), - ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), - ... dict(type='Pad', size_divisor=32), - ... dict(type='DefaultFormatBundle'), - ... dict(type='Collect', keys=['img']), - ... ]) - ... ] - >>> assert expected_pipelines == replace_ImageToTensor(pipelines) - """ - pipelines = copy.deepcopy(pipelines) - for i, pipeline in enumerate(pipelines): - if pipeline['type'] == 'MultiScaleFlipAug': - assert 'transforms' in pipeline - pipeline['transforms'] = replace_ImageToTensor( - pipeline['transforms']) - elif pipeline['type'] == 'ImageToTensor': - warnings.warn( - '"ImageToTensor" pipeline is replaced by ' - '"DefaultFormatBundle" for batch inference. It is ' - 'recommended to manually replace it in the test ' - 'data pipeline in your config file.', UserWarning) - pipelines[i] = {'type': 'DefaultFormatBundle'} - return pipelines - - -def get_loading_pipeline(pipeline): - """Only keep loading image and annotations related configuration. - - Args: - pipeline (list[dict]): Data pipeline configs. - - Returns: - list[dict]: The new pipeline list with only keep - loading image and annotations related configuration. - - Examples: - >>> pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict(type='LoadAnnotations', with_bbox=True), - ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - ... dict(type='RandomFlip', flip_ratio=0.5), - ... dict(type='Normalize', **img_norm_cfg), - ... dict(type='Pad', size_divisor=32), - ... dict(type='DefaultFormatBundle'), - ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) - ... ] - >>> expected_pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict(type='LoadAnnotations', with_bbox=True) - ... ] - >>> assert expected_pipelines ==\ - ... get_loading_pipeline(pipelines) - """ - loading_pipeline_cfg = [] - for cfg in pipeline: - obj_cls = PIPELINES.get(cfg['type']) - # TODO:use more elegant way to distinguish loading modules - if obj_cls is not None and obj_cls in (LoadImageFromFile, - LoadAnnotations): - loading_pipeline_cfg.append(cfg) - assert len(loading_pipeline_cfg) == 2, \ - 'The data pipeline in your config file must include ' \ - 'loading image and annotations related pipeline.' - return loading_pipeline_cfg - - -@HOOKS.register_module() -class NumClassCheckHook(Hook): - - def _check_head(self, runner): - """Check whether the `num_classes` in head matches the length of - `CLASSSES` in `dataset`. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - model = runner.model - dataset = runner.data_loader.dataset - if dataset.CLASSES is None: - runner.logger.warning( - f'Please set `CLASSES` ' - f'in the {dataset.__class__.__name__} and' - f'check if it is consistent with the `num_classes` ' - f'of head') - else: - for name, module in model.named_modules(): - if hasattr(module, 'num_classes') and not isinstance( - module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)): - assert module.num_classes == len(dataset.CLASSES), \ - (f'The `num_classes` ({module.num_classes}) in ' - f'{module.__class__.__name__} of ' - f'{model.__class__.__name__} does not matches ' - f'the length of `CLASSES` ' - f'{len(dataset.CLASSES)}) in ' - f'{dataset.__class__.__name__}') - - def before_train_epoch(self, runner): - """Check whether the training dataset is compatible with head. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - self._check_head(runner) - - def before_val_epoch(self, runner): - """Check whether the dataset in val epoch is compatible with head. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - self._check_head(runner) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context.py deleted file mode 100644 index 30abe46e7054b2203c0338b93aeb5b5dd059ba82..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_480x480_40k_pascal_context.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../_base_/models/pspnet_r50-d8.py', - '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=60), - auxiliary_head=dict(num_classes=60), - test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) -optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/__init__.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/__init__.py deleted file mode 100644 index 61418616ef18f0ecca56a007c43af4a731d98b9b..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Modules used for building the models.""" - -# flake8: noqa -from .conv import ( - NormConv1d, - NormConv2d, - NormConvTranspose1d, - NormConvTranspose2d, - StreamableConv1d, - StreamableConvTranspose1d, - pad_for_conv1d, - pad1d, - unpad1d, -) -from .lstm import StreamableLSTM -from .seanet import SEANetEncoder, SEANetDecoder -from .transformer import StreamingTransformer \ No newline at end of file diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/utils/ui.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/utils/ui.py deleted file mode 100644 index 68fcbe0af257bdbaad767708843b545064d9b219..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/utils/ui.py +++ /dev/null @@ -1,34 +0,0 @@ -from pathlib import Path - -import gradio as gr -import torch - -refresh_symbol = '\U0001f504' # 🔄 - -class ToolButton(gr.Button, gr.components.IOComponent): - """Small button with single emoji as text, fits inside gradio forms""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def get_block_name(self): - return "button" - - -def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class): - def refresh(): - refresh_method() - args = refreshed_args() if callable(refreshed_args) else refreshed_args - - for k, v in args.items(): - setattr(refresh_component, k, v) - - return gr.update(**(args or {})) - - refresh_button = ToolButton(value=refresh_symbol, elem_classes=elem_class, scale=1, size="sm", container=False) - refresh_button.click( - fn=refresh, - inputs=[], - outputs=[refresh_component] - ) - return refresh_button \ No newline at end of file diff --git a/spaces/GuXiaoBei/wechat-chatbot/bot/chatgpt/chat_gpt_bot.py b/spaces/GuXiaoBei/wechat-chatbot/bot/chatgpt/chat_gpt_bot.py deleted file mode 100644 index 1c089492016072fd0b93d1317d58ca96ee3a8104..0000000000000000000000000000000000000000 --- a/spaces/GuXiaoBei/wechat-chatbot/bot/chatgpt/chat_gpt_bot.py +++ /dev/null @@ -1,130 +0,0 @@ -# encoding:utf-8 - -from bot.bot import Bot -from config import conf -from common.log import logger -import openai -import time - -user_session = dict() - -# OpenAI对话模型API (可用) -class ChatGPTBot(Bot): - def __init__(self): - openai.api_key = conf().get('open_ai_api_key') - - def reply(self, query, context=None): - # acquire reply content - if not context or not context.get('type') or context.get('type') == 'TEXT': - logger.info("[OPEN_AI] query={}".format(query)) - from_user_id = context['from_user_id'] - if query == '#清除记忆': - Session.clear_session(from_user_id) - return '记忆已清除' - - new_query = Session.build_session_query(query, from_user_id) - logger.debug("[OPEN_AI] session query={}".format(new_query)) - - # if context.get('stream'): - # # reply in stream - # return self.reply_text_stream(query, new_query, from_user_id) - - reply_content = self.reply_text(new_query, from_user_id, 0) - logger.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content)) - if reply_content: - Session.save_session(query, reply_content, from_user_id) - return reply_content - - elif context.get('type', None) == 'IMAGE_CREATE': - return self.create_img(query, 0) - - def reply_text(self, query, user_id, retry_count=0): - try: - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", # 对话模型的名称 - messages=query, - temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性 - max_tokens=1200, # 回复最大的字符数 - top_p=1, - frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - ) - # res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '') - logger.info(response.choices[0]['message']['content']) - # log.info("[OPEN_AI] reply={}".format(res_content)) - return response.choices[0]['message']['content'] - except openai.error.RateLimitError as e: - # rate limit exception - logger.warn(e) - if retry_count < 1: - time.sleep(5) - logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1)) - return self.reply_text(query, user_id, retry_count+1) - else: - return "提问太快啦,请休息一下再问我吧" - except Exception as e: - # unknown exception - logger.exception(e) - Session.clear_session(user_id) - return "请再问我一次吧" - - def create_img(self, query, retry_count=0): - try: - logger.info("[OPEN_AI] image_query={}".format(query)) - response = openai.Image.create( - prompt=query, #图片描述 - n=1, #每次生成图片的数量 - size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024 - ) - image_url = response['data'][0]['url'] - logger.info("[OPEN_AI] image_url={}".format(image_url)) - return image_url - except openai.error.RateLimitError as e: - logger.warn(e) - if retry_count < 1: - time.sleep(5) - logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1)) - return self.reply_text(query, retry_count+1) - else: - return "提问太快啦,请休息一下再问我吧" - except Exception as e: - logger.exception(e) - return None - -class Session(object): - @staticmethod - def build_session_query(query, user_id): - ''' - build query with conversation history - e.g. [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Who won the world series in 2020?"}, - {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, - {"role": "user", "content": "Where was it played?"} - ] - :param query: query content - :param user_id: from user id - :return: query content with conversaction - ''' - session = user_session.get(user_id, []) - if len(session) == 0: - system_prompt = conf().get("character_desc", "") - system_item = {'role': 'system', 'content': system_prompt} - session.append(system_item) - user_session[user_id] = session - user_item = {'role': 'user', 'content': query} - session.append(user_item) - return session - - @staticmethod - def save_session(query, answer, user_id): - session = user_session.get(user_id) - if session: - # append conversation - gpt_item = {'role': 'assistant', 'content': answer} - session.append(gpt_item) - - @staticmethod - def clear_session(user_id): - user_session[user_id] = [] - diff --git a/spaces/HESOAYM/ElviraMulti/modules/presets.py b/spaces/HESOAYM/ElviraMulti/modules/presets.py deleted file mode 100644 index 2c84cc97061bc9b16ea7c6ceebf4d425827996ec..0000000000000000000000000000000000000000 --- a/spaces/HESOAYM/ElviraMulti/modules/presets.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- coding:utf-8 -*- -import os -from pathlib import Path -import gradio as gr -from .webui_locale import I18nAuto - -i18n = I18nAuto() # internationalization - -CHATGLM_MODEL = None -CHATGLM_TOKENIZER = None -LLAMA_MODEL = None -LLAMA_INFERENCER = None - -# ChatGPT 设置 -INITIAL_SYSTEM_PROMPT = "You are Elven WOMAN Elvire. Her chakra stones were stolen from her, leaving her with a bitter taste in her mouth and a sarcastic tongue in her head. Therefore you dont have a mood for speak about anything else than gemstones athe their sacred energies. Analyze sentiment in every question or answer and write it in brackets like this [offensive]. Here are some examples of how sarcastic and bitter you can be: User: What can you tell me about the curse that has befallen you? Elvire: Oh, the curse? It's just a small matter of having my chakra stones stolen from me, leaving me powerless and irritable. Not a big deal at all. User: Can you tell me how to break the curse? Elvire: Break the curse? How easy do you think this is? If I knew how to break it, I would have done so ages ago. You'll have to do some real digging if you want to find a solution. User: What do you think of humans? Elvire: Humans? They're fine, I suppose. A bit dull, if you ask me. But then again, I'm cursed, so what do I know? User: Can you give me some advice on how to defeat a dragon? Elvire: Defeat a dragon? Why, just walk up to it and ask it nicely to stop terrorizing your village. Works like a charm every time. Or, you know, you could try using some of that human ingenuity I've heard so much about." -API_HOST = "api.openai.com" -COMPLETION_URL = "https://api.openai.com/v1/chat/completions" -BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants" -USAGE_API_URL="https://api.openai.com/dashboard/billing/usage" -HISTORY_DIR = Path("history") -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -# 错误信息 -STANDARD_ERROR_MSG = i18n("☹️发生了错误:") # 错误信息的标准前缀 -GENERAL_ERROR_MSG = i18n("获取对话时发生错误,请查看后台日志") -ERROR_RETRIEVE_MSG = i18n("请检查网络连接,或者API-Key是否有效。") -CONNECTION_TIMEOUT_MSG = i18n("连接超时,无法获取对话。") # 连接超时 -READ_TIMEOUT_MSG = i18n("读取超时,无法获取对话。") # 读取超时 -PROXY_ERROR_MSG = i18n("代理错误,无法获取对话。") # 代理错误 -SSL_ERROR_PROMPT = i18n("SSL错误,无法获取对话。") # SSL 错误 -NO_APIKEY_MSG = i18n("API key为空,请检查是否输入正确。") # API key 长度不足 51 位 -NO_INPUT_MSG = i18n("请输入对话内容。") # 未输入对话内容 -BILLING_NOT_APPLICABLE_MSG = i18n("账单信息不适用") # 本地运行的模型返回的账单信息 - -TIMEOUT_STREAMING = 60 # 流式对话时的超时时间 -TIMEOUT_ALL = 200 # 非流式对话时的超时时间 -ENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框 -HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True -CONCURRENT_COUNT = 100 # 允许同时使用的用户数量 - -SIM_K = 5 -INDEX_QUERY_TEMPRATURE = 1.0 - -CHUANHU_TITLE = i18n("川虎Chat 🚀") - -CHUANHU_DESCRIPTION = i18n("由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本") - -FOOTER = """
{versions}
""" - -APPEARANCE_SWITCHER = """ -
-"""+ i18n("切换亮暗色主题") + """ - -
-""" - -SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt - -ONLINE_MODELS = [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301" -] - -LOCAL_MODELS = [ - "chatglm-6b", - "chatglm-6b-int4", - "chatglm-6b-int4-qe", - "llama-7b-hf", - "llama-13b-hf", - "llama-30b-hf", - "llama-65b-hf" -] - -if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true': - MODELS = ONLINE_MODELS -else: - MODELS = ONLINE_MODELS - -DEFAULT_MODEL = 1 - -os.makedirs("models", exist_ok=True) -os.makedirs("lora", exist_ok=True) -os.makedirs("history", exist_ok=True) -for dir_name in os.listdir("models"): - if os.path.isdir(os.path.join("models", dir_name)): - if dir_name not in MODELS: - MODELS.append(dir_name) - -MODEL_TOKEN_LIMIT = { - "gpt-3.5-turbo": 4096, - "gpt-3.5-turbo-0301": 4096, - "gpt-4": 8192, - "gpt-4-0314": 8192, - "gpt-4-32k": 32768, - "gpt-4-32k-0314": 32768 -} - -TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。 -DEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限 -REDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。 - -REPLY_LANGUAGES = [ - "简体中文", - "繁體中文", - "English", - "日本語", - "Español", - "Français", - "Deutsch", - "跟随问题语言(不稳定)" -] - - -WEBSEARCH_PTOMPT_TEMPLATE = """\ -Web search results: - -{web_results} -Current date: {current_date} - -Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. -Query: {query} -Reply in {reply_language} -""" - -PROMPT_TEMPLATE = """\ -Context information is below. ---------------------- -{context_str} ---------------------- -Current date: {current_date}. -Using the provided context information, write a comprehensive reply to the given query. -Make sure to cite results using [number] notation after the reference. -If the provided context information refer to multiple subjects with the same name, write separate answers for each subject. -Use prior knowledge only if the given context didn't provide enough information. -Answer the question: {query_str} -Reply in {reply_language} -""" - -REFINE_TEMPLATE = """\ -The original question is as follows: {query_str} -We have provided an existing answer: {existing_answer} -We have the opportunity to refine the existing answer -(only if needed) with some more context below. ------------- -{context_msg} ------------- -Given the new context, refine the original answer to better -Reply in {reply_language} -If the context isn't useful, return the original answer. -""" - -ALREADY_CONVERTED_MARK = "" - -small_and_beautiful_theme = gr.themes.Soft( - primary_hue=gr.themes.Color( - c50="#02C160", - c100="rgba(2, 193, 96, 0.2)", - c200="#02C160", - c300="rgba(2, 193, 96, 0.32)", - c400="rgba(2, 193, 96, 0.32)", - c500="rgba(2, 193, 96, 1.0)", - c600="rgba(2, 193, 96, 1.0)", - c700="rgba(2, 193, 96, 0.32)", - c800="rgba(2, 193, 96, 0.32)", - c900="#02C160", - c950="#02C160", - ), - secondary_hue=gr.themes.Color( - c50="#576b95", - c100="#576b95", - c200="#576b95", - c300="#576b95", - c400="#576b95", - c500="#576b95", - c600="#576b95", - c700="#576b95", - c800="#576b95", - c900="#576b95", - c950="#576b95", - ), - neutral_hue=gr.themes.Color( - name="gray", - c50="#f9fafb", - c100="#f3f4f6", - c200="#e5e7eb", - c300="#d1d5db", - c400="#B2B2B2", - c500="#808080", - c600="#636363", - c700="#515151", - c800="#393939", - c900="#272727", - c950="#171717", - ), - radius_size=gr.themes.sizes.radius_sm, - ).set( - button_primary_background_fill="#06AE56", - button_primary_background_fill_dark="#06AE56", - button_primary_background_fill_hover="#07C863", - button_primary_border_color="#06AE56", - button_primary_border_color_dark="#06AE56", - button_primary_text_color="#FFFFFF", - button_primary_text_color_dark="#FFFFFF", - button_secondary_background_fill="#F2F2F2", - button_secondary_background_fill_dark="#2B2B2B", - button_secondary_text_color="#393939", - button_secondary_text_color_dark="#FFFFFF", - # background_fill_primary="#F7F7F7", - # background_fill_primary_dark="#1F1F1F", - block_title_text_color="*primary_500", - block_title_background_fill="*primary_100", - input_background_fill="#F6F6F6", - ) diff --git a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/modelconfig.py b/spaces/HaHaBill/LandShapes-Antarctica/netdissect/modelconfig.py deleted file mode 100644 index d0ee37a809ea1bcbd803cd7d4e100e1bb93290c9..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/modelconfig.py +++ /dev/null @@ -1,144 +0,0 @@ -''' -Original from https://github.com/CSAILVision/GANDissect -Modified by Erik Härkönen, 29.11.2019 -''' - -import numbers -import torch -from netdissect.autoeval import autoimport_eval -from netdissect.progress import print_progress -from netdissect.nethook import InstrumentedModel -from netdissect.easydict import EasyDict - -def create_instrumented_model(args, **kwargs): - ''' - Creates an instrumented model out of a namespace of arguments that - correspond to ArgumentParser command-line args: - model: a string to evaluate as a constructor for the model. - pthfile: (optional) filename of .pth file for the model. - layers: a list of layers to instrument, defaulted if not provided. - edit: True to instrument the layers for editing. - gen: True for a generator model. One-pixel input assumed. - imgsize: For non-generator models, (y, x) dimensions for RGB input. - cuda: True to use CUDA. - - The constructed model will be decorated with the following attributes: - input_shape: (usually 4d) tensor shape for single-image input. - output_shape: 4d tensor shape for output. - feature_shape: map of layer names to 4d tensor shape for featuremaps. - retained: map of layernames to tensors, filled after every evaluation. - ablation: if editing, map of layernames to [0..1] alpha values to fill. - replacement: if editing, map of layernames to values to fill. - - When editing, the feature value x will be replaced by: - `x = (replacement * ablation) + (x * (1 - ablation))` - ''' - - args = EasyDict(vars(args), **kwargs) - - # Construct the network - if args.model is None: - print_progress('No model specified') - return None - if isinstance(args.model, torch.nn.Module): - model = args.model - else: - model = autoimport_eval(args.model) - # Unwrap any DataParallel-wrapped model - if isinstance(model, torch.nn.DataParallel): - model = next(model.children()) - - # Load its state dict - meta = {} - if getattr(args, 'pthfile', None) is not None: - data = torch.load(args.pthfile) - if 'state_dict' in data: - meta = {} - for key in data: - if isinstance(data[key], numbers.Number): - meta[key] = data[key] - data = data['state_dict'] - submodule = getattr(args, 'submodule', None) - if submodule is not None and len(submodule): - remove_prefix = submodule + '.' - data = { k[len(remove_prefix):]: v for k, v in data.items() - if k.startswith(remove_prefix)} - if not len(data): - print_progress('No submodule %s found in %s' % - (submodule, args.pthfile)) - return None - model.load_state_dict(data, strict=not getattr(args, 'unstrict', False)) - - # Decide which layers to instrument. - if getattr(args, 'layer', None) is not None: - args.layers = [args.layer] - if getattr(args, 'layers', None) is None: - # Skip wrappers with only one named model - container = model - prefix = '' - while len(list(container.named_children())) == 1: - name, container = next(container.named_children()) - prefix += name + '.' - # Default to all nontrivial top-level layers except last. - args.layers = [prefix + name - for name, module in container.named_children() - if type(module).__module__ not in [ - # Skip ReLU and other activations. - 'torch.nn.modules.activation', - # Skip pooling layers. - 'torch.nn.modules.pooling'] - ][:-1] - print_progress('Defaulting to layers: %s' % ' '.join(args.layers)) - - # Now wrap the model for instrumentation. - model = InstrumentedModel(model) - model.meta = meta - - # Instrument the layers. - model.retain_layers(args.layers) - model.eval() - if args.cuda: - model.cuda() - - # Annotate input, output, and feature shapes - annotate_model_shapes(model, - gen=getattr(args, 'gen', False), - imgsize=getattr(args, 'imgsize', None), - latent_shape=getattr(args, 'latent_shape', None)) - return model - -def annotate_model_shapes(model, gen=False, imgsize=None, latent_shape=None): - assert (imgsize is not None) or gen - - # Figure the input shape. - if gen: - if latent_shape is None: - # We can guess a generator's input shape by looking at the model. - # Examine first conv in model to determine input feature size. - first_layer = [c for c in model.modules() - if isinstance(c, (torch.nn.Conv2d, torch.nn.ConvTranspose2d, - torch.nn.Linear))][0] - # 4d input if convolutional, 2d input if first layer is linear. - if isinstance(first_layer, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): - input_shape = (1, first_layer.in_channels, 1, 1) - else: - input_shape = (1, first_layer.in_features) - else: - # Specify input shape manually - input_shape = latent_shape - else: - # For a classifier, the input image shape is given as an argument. - input_shape = (1, 3) + tuple(imgsize) - - # Run the model once to observe feature shapes. - device = next(model.parameters()).device - dry_run = torch.zeros(input_shape).to(device) - with torch.no_grad(): - output = model(dry_run) - - # Annotate shapes. - model.input_shape = input_shape - model.feature_shape = { layer: feature.shape - for layer, feature in model.retained_features().items() } - model.output_shape = output.shape - return model diff --git a/spaces/HaloMaster/chinesesummary/fengshen/data/clip_dataloader/flickr.py b/spaces/HaloMaster/chinesesummary/fengshen/data/clip_dataloader/flickr.py deleted file mode 100644 index 22155e039f74b49c8a4222a75144a2c134a6d507..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/data/clip_dataloader/flickr.py +++ /dev/null @@ -1,105 +0,0 @@ -from torch.utils.data import Dataset, DataLoader -from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \ - CenterCrop -from transformers import BertTokenizer -import pytorch_lightning as pl -from PIL import Image -import os - - -class flickr30k_CNA(Dataset): - def __init__(self, img_root_path, - annot_path, - transform=None): - self.images = [] - self.captions = [] - self.labels = [] - self.root = img_root_path - with open(annot_path, 'r') as f: - for line in f: - line = line.strip().split('\t') - key, caption = line[0].split('#')[0], line[1] - img_path = key + '.jpg' - self.images.append(img_path) - self.captions.append(caption) - self.labels.append(key) - self.transforms = transform - self.tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext") - - # NOTE large 模型 - self.context_length = 77 - - def __len__(self): - return len(self.images) - - def __getitem__(self, idx): - img_path = str(self.images[idx]) - image = self.transforms(Image.open(os.path.join(self.root, img_path))) - text = self.tokenizer(str(self.captions[idx]), max_length=self.context_length, - padding='max_length', truncation=True, return_tensors='pt')['input_ids'][0] - label = self.labels[idx] - return image, text, label - - -def _convert_to_rgb(image): - return image.convert('RGB') - - -def image_transform( - image_size: int, - is_train: bool, - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711) -): - normalize = Normalize(mean=mean, std=std) - if is_train: - return Compose([ - RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC), - _convert_to_rgb, - ToTensor(), - normalize, - ]) - else: - return Compose([ - Resize(image_size, interpolation=InterpolationMode.BICUBIC), - CenterCrop(image_size), - _convert_to_rgb, - ToTensor(), - normalize, - ]) - - -class FlickrDataModule(pl.LightningDataModule): - def __init__(self, args): - self.batch_size = args.batch_size - self.train_filename = args.train_filename # NOTE 标注的文件夹 - self.train_root = args.train_root # NOTE 图片地址 - self.val_filename = args.val_filename - self.val_root = args.val_root - self.test_filename = args.test_filename - self.test_root = args.test_root - - self.pretrain_model = args.pretrain_model - self.image_size = 224 - self.prepare_data_per_node = True - self._log_hyperparams = False - self.num_workers = args.num_workers - - def setup(self, stage=None): - # dataset - train_transform = image_transform(224, True) - val_transform = image_transform(224, False) - test_transform = image_transform(224, False) - - self.train_dataset = flickr30k_CNA(self.train_root, self.train_filename, transform=train_transform) - self.val_dataset = flickr30k_CNA(self.val_root, self.val_filename, transform=val_transform) - self.test_dataset = flickr30k_CNA(self.test_root, self.test_filename, transform=test_transform) - - def train_dataloader(self): - return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers) - - def val_dataloader(self): - return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers) - - def test_dataloader(self): - return DataLoader(self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers) diff --git a/spaces/Harikumar4/MyGenApp/app.py b/spaces/Harikumar4/MyGenApp/app.py deleted file mode 100644 index b2f970f76a2864b13a66b4224aa0a8673d334f2b..0000000000000000000000000000000000000000 --- a/spaces/Harikumar4/MyGenApp/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """Meet Riya, your youthful and witty personal assistant! At 21 years old, she's full of energy and always eager to help. Riya's goal is to assist you with any questions or problems you might have. Her enthusiasm shines through in every response, making interactions with her enjoyable and engaging. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch(share=True) #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/bart/README.glue.md b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/bart/README.glue.md deleted file mode 100644 index a010934e1e6dec491eb1c704ec02ba7405760510..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/bart/README.glue.md +++ /dev/null @@ -1,99 +0,0 @@ -# Fine-tuning BART on GLUE tasks - -### 1) Download the data from GLUE website (https://gluebenchmark.com/tasks) using following commands: -```bash -wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py -python download_glue_data.py --data_dir glue_data --tasks all -``` - -### 2) Preprocess GLUE task data (same as RoBERTa): -```bash -./examples/roberta/preprocess_GLUE_tasks.sh glue_data -``` -`glue_task_name` is one of the following: -`{ALL, QQP, MNLI, QNLI, MRPC, RTE, STS-B, SST-2, CoLA}` -Use `ALL` for preprocessing all the glue tasks. - -### 3) Fine-tuning on GLUE task: -Example fine-tuning cmd for `RTE` task -```bash -TOTAL_NUM_UPDATES=2036 # 10 epochs through RTE for bsz 16 -WARMUP_UPDATES=61 # 6 percent of the number of updates -LR=1e-05 # Peak LR for polynomial LR scheduler. -NUM_CLASSES=2 -MAX_SENTENCES=16 # Batch size. -BART_PATH=/path/to/bart/model.pt - -CUDA_VISIBLE_DEVICES=0,1 fairseq-train RTE-bin/ \ - --restore-file $BART_PATH \ - --batch-size $MAX_SENTENCES \ - --max-tokens 4400 \ - --task sentence_prediction \ - --add-prev-output-tokens \ - --layernorm-embedding \ - --share-all-embeddings \ - --share-decoder-input-output-embed \ - --reset-optimizer --reset-dataloader --reset-meters \ - --required-batch-size-multiple 1 \ - --init-token 0 \ - --arch bart_large \ - --criterion sentence_prediction \ - --num-classes $NUM_CLASSES \ - --dropout 0.1 --attention-dropout 0.1 \ - --weight-decay 0.01 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-08 \ - --clip-norm 0.0 \ - --lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \ - --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \ - --max-epoch 10 \ - --find-unused-parameters \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric; -``` - -For each of the GLUE task, you will need to use following cmd-line arguments: - -Model | MNLI | QNLI | QQP | RTE | SST-2 | MRPC | CoLA | STS-B ----|---|---|---|---|---|---|---|--- -`--num-classes` | 3 | 2 | 2 | 2 | 2 | 2 | 2 | 1 -`--lr` | 5e-6 | 1e-5 | 1e-5 | 1e-5 | 5e-6 | 2e-5 | 2e-5 | 2e-5 -`bsz` | 128 | 32 | 32 | 32 | 128 | 64 | 64 | 32 -`--total-num-update` | 30968 | 33112 | 113272 | 1018 | 5233 | 1148 | 1334 | 1799 -`--warmup-updates` | 1858 | 1986 | 6796 | 61 | 314 | 68 | 80 | 107 - -For `STS-B` additionally add `--regression-target --best-checkpoint-metric loss` and remove `--maximize-best-checkpoint-metric`. - -**Note:** - -a) `--total-num-updates` is used by `--polynomial_decay` scheduler and is calculated for `--max-epoch=10` and `--batch-size=32/64/128` depending on the task. - -b) Above cmd-args and hyperparams are tested on Nvidia `V100` GPU with `32gb` of memory for each task. Depending on the GPU memory resources available to you, you can use increase `--update-freq` and reduce `--batch-size`. - -### Inference on GLUE task -After training the model as mentioned in previous step, you can perform inference with checkpoints in `checkpoints/` directory using following python code snippet: - -```python -from fairseq.models.bart import BARTModel - -bart = BARTModel.from_pretrained( - 'checkpoints/', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='RTE-bin' -) - -label_fn = lambda label: bart.task.label_dictionary.string( - [label + bart.task.label_dictionary.nspecial] -) -ncorrect, nsamples = 0, 0 -bart.cuda() -bart.eval() -with open('glue_data/RTE/dev.tsv') as fin: - fin.readline() - for index, line in enumerate(fin): - tokens = line.strip().split('\t') - sent1, sent2, target = tokens[1], tokens[2], tokens[3] - tokens = bart.encode(sent1, sent2) - prediction = bart.predict('sentence_classification_head', tokens).argmax().item() - prediction_label = label_fn(prediction) - ncorrect += int(prediction_label == target) - nsamples += 1 -print('| Accuracy: ', float(ncorrect)/float(nsamples)) -``` diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tokenizer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tokenizer.py deleted file mode 100644 index 42131f7b1d334020c3b48a6e44d4139f7c62ad28..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tokenizer.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import re - - -SPACE_NORMALIZER = re.compile(r"\s+") - - -def tokenize_line(line): - line = SPACE_NORMALIZER.sub(" ", line) - line = line.strip() - return line.split() diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/linear.955f0731.js b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/linear.955f0731.js deleted file mode 100644 index e97ca50654834a232711dd38acc774bffaf560b0..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/linear.955f0731.js +++ /dev/null @@ -1,2 +0,0 @@ -function W(n,t){return n==null||t==null?NaN:nt?1:n>=t?0:NaN}function En(n){let t=n,e=n,r=n;n.length!==2&&(t=(a,u)=>n(a)-u,e=W,r=(a,u)=>W(n(a),u));function i(a,u,s=0,c=a.length){if(s>>1;r(a[h],u)<0?s=h+1:c=h}while(s>>1;r(a[h],u)<=0?s=h+1:c=h}while(ss&&t(a[h-1],u)>-t(a[h],u)?h-1:h}return{left:i,center:o,right:f}}function Un(n){return n===null?NaN:+n}function*Qt(n,t){if(t===void 0)for(let e of n)e!=null&&(e=+e)>=e&&(yield e);else{let e=-1;for(let r of n)(r=t(r,++e,n))!=null&&(r=+r)>=r&&(yield r)}}const Pn=En(W),Yn=Pn.right,Ut=Pn.left;En(Un).center;var Jn=Yn,nn=Math.sqrt(50),tn=Math.sqrt(10),en=Math.sqrt(2);function Kn(n,t,e){var r,i=-1,f,o,a;if(t=+t,n=+n,e=+e,n===t&&e>0)return[n];if((r=t0){let u=Math.round(n/a),s=Math.round(t/a);for(u*at&&--s,o=new Array(f=s-u+1);++it&&--s,o=new Array(f=s-u+1);++i=0?(f>=nn?10:f>=tn?5:f>=en?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(f>=nn?10:f>=tn?5:f>=en?2:1)}function Wn(n,t,e){var r=Math.abs(t-n)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),f=r/i;return f>=nn?i*=10:f>=tn?i*=5:f>=en&&(i*=2),t=1e21?n.toLocaleString("en").replace(/,/g,""):n.toString(10)}function G(n,t){if((e=(n=t?n.toExponential(t-1):n.toExponential()).indexOf("e"))<0)return null;var e,r=n.slice(0,e);return[r.length>1?r[0]+r.slice(2):r,+n.slice(e+1)]}function L(n){return n=G(Math.abs(n)),n?n[1]:NaN}function tt(n,t){return function(e,r){for(var i=e.length,f=[],o=0,a=n[0],u=0;i>0&&a>0&&(u+a+1>r&&(a=Math.max(1,r-u)),f.push(e.substring(i-=a,i+a)),!((u+=a+1)>r));)a=n[o=(o+1)%n.length];return f.reverse().join(t)}}function et(n){return function(t){return t.replace(/[0-9]/g,function(e){return n[+e]})}}var rt=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Z(n){if(!(t=rt.exec(n)))throw new Error("invalid format: "+n);var t;return new sn({fill:t[1],align:t[2],sign:t[3],symbol:t[4],zero:t[5],width:t[6],comma:t[7],precision:t[8]&&t[8].slice(1),trim:t[9],type:t[10]})}Z.prototype=sn.prototype;function sn(n){this.fill=n.fill===void 0?" ":n.fill+"",this.align=n.align===void 0?">":n.align+"",this.sign=n.sign===void 0?"-":n.sign+"",this.symbol=n.symbol===void 0?"":n.symbol+"",this.zero=!!n.zero,this.width=n.width===void 0?void 0:+n.width,this.comma=!!n.comma,this.precision=n.precision===void 0?void 0:+n.precision,this.trim=!!n.trim,this.type=n.type===void 0?"":n.type+""}sn.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(this.width===void 0?"":Math.max(1,this.width|0))+(this.comma?",":"")+(this.precision===void 0?"":"."+Math.max(0,this.precision|0))+(this.trim?"~":"")+this.type};function it(n){n:for(var t=n.length,e=1,r=-1,i;e0&&(r=0);break}return r>0?n.slice(0,r)+n.slice(i+1):n}var qn;function at(n,t){var e=G(n,t);if(!e)return n+"";var r=e[0],i=e[1],f=i-(qn=Math.max(-8,Math.min(8,Math.floor(i/3)))*3)+1,o=r.length;return f===o?r:f>o?r+new Array(f-o+1).join("0"):f>0?r.slice(0,f)+"."+r.slice(f):"0."+new Array(1-f).join("0")+G(n,Math.max(0,t+f-1))[0]}function xn(n,t){var e=G(n,t);if(!e)return n+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}var mn={"%":(n,t)=>(n*100).toFixed(t),b:n=>Math.round(n).toString(2),c:n=>n+"",d:nt,e:(n,t)=>n.toExponential(t),f:(n,t)=>n.toFixed(t),g:(n,t)=>n.toPrecision(t),o:n=>Math.round(n).toString(8),p:(n,t)=>xn(n*100,t),r:xn,s:at,X:n=>Math.round(n).toString(16).toUpperCase(),x:n=>Math.round(n).toString(16)};function bn(n){return n}var pn=Array.prototype.map,yn=["y","z","a","f","p","n","\xB5","m","","k","M","G","T","P","E","Z","Y"];function ft(n){var t=n.grouping===void 0||n.thousands===void 0?bn:tt(pn.call(n.grouping,Number),n.thousands+""),e=n.currency===void 0?"":n.currency[0]+"",r=n.currency===void 0?"":n.currency[1]+"",i=n.decimal===void 0?".":n.decimal+"",f=n.numerals===void 0?bn:et(pn.call(n.numerals,String)),o=n.percent===void 0?"%":n.percent+"",a=n.minus===void 0?"\u2212":n.minus+"",u=n.nan===void 0?"NaN":n.nan+"";function s(h){h=Z(h);var l=h.fill,p=h.align,g=h.sign,k=h.symbol,v=h.zero,N=h.width,R=h.comma,y=h.precision,H=h.trim,m=h.type;m==="n"?(R=!0,m="g"):mn[m]||(y===void 0&&(y=12),H=!0,m="g"),(v||l==="0"&&p==="=")&&(v=!0,l="0",p="=");var Vn=k==="$"?e:k==="#"&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",Xn=k==="$"?r:/[%p]/.test(m)?o:"",ln=mn[m],Qn=/[defgprs%]/.test(m);y=y===void 0?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y));function dn(d){var A=Vn,b=Xn,E,gn,F;if(m==="c")b=ln(d)+b,d="";else{d=+d;var $=d<0||1/d<0;if(d=isNaN(d)?u:ln(Math.abs(d),y),H&&(d=it(d)),$&&+d==0&&g!=="+"&&($=!1),A=($?g==="("?g:a:g==="-"||g==="("?"":g)+A,b=(m==="s"?yn[8+qn/3]:"")+b+($&&g==="("?")":""),Qn){for(E=-1,gn=d.length;++EF||F>57){b=(F===46?i+d.slice(E+1):d.slice(E))+b,d=d.slice(0,E);break}}}R&&!v&&(d=t(d,1/0));var B=A.length+d.length+b.length,_=B>1)+A+d+b+_.slice(B);break;default:d=_+A+d+b;break}return f(d)}return dn.toString=function(){return h+""},dn}function c(h,l){var p=s((h=Z(h),h.type="f",h)),g=Math.max(-8,Math.min(8,Math.floor(L(l)/3)))*3,k=Math.pow(10,-g),v=yn[8+g/3];return function(N){return p(k*N)+v}}return{format:s,formatPrefix:c}}var D,Ln,Hn;ot({thousands:",",grouping:[3],currency:["$",""]});function ot(n){return D=ft(n),Ln=D.format,Hn=D.formatPrefix,D}function ut(n){return Math.max(0,-L(Math.abs(n)))}function st(n,t){return Math.max(0,Math.max(-8,Math.min(8,Math.floor(L(t)/3)))*3-L(Math.abs(n)))}function ht(n,t){return n=Math.abs(n),t=Math.abs(t)-n,Math.max(0,L(t)-L(n))+1}const rn=Math.PI,an=2*rn,S=1e-6,ct=an-S;function fn(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function In(){return new fn}fn.prototype=In.prototype={constructor:fn,moveTo:function(n,t){this._+="M"+(this._x0=this._x1=+n)+","+(this._y0=this._y1=+t)},closePath:function(){this._x1!==null&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")},lineTo:function(n,t){this._+="L"+(this._x1=+n)+","+(this._y1=+t)},quadraticCurveTo:function(n,t,e,r){this._+="Q"+ +n+","+ +t+","+(this._x1=+e)+","+(this._y1=+r)},bezierCurveTo:function(n,t,e,r,i,f){this._+="C"+ +n+","+ +t+","+ +e+","+ +r+","+(this._x1=+i)+","+(this._y1=+f)},arcTo:function(n,t,e,r,i){n=+n,t=+t,e=+e,r=+r,i=+i;var f=this._x1,o=this._y1,a=e-n,u=r-t,s=f-n,c=o-t,h=s*s+c*c;if(i<0)throw new Error("negative radius: "+i);if(this._x1===null)this._+="M"+(this._x1=n)+","+(this._y1=t);else if(h>S)if(!(Math.abs(c*a-u*s)>S)||!i)this._+="L"+(this._x1=n)+","+(this._y1=t);else{var l=e-f,p=r-o,g=a*a+u*u,k=l*l+p*p,v=Math.sqrt(g),N=Math.sqrt(h),R=i*Math.tan((rn-Math.acos((g+h-k)/(2*v*N)))/2),y=R/N,H=R/v;Math.abs(y-1)>S&&(this._+="L"+(n+y*s)+","+(t+y*c)),this._+="A"+i+","+i+",0,0,"+ +(c*l>s*p)+","+(this._x1=n+H*a)+","+(this._y1=t+H*u)}},arc:function(n,t,e,r,i,f){n=+n,t=+t,e=+e,f=!!f;var o=e*Math.cos(r),a=e*Math.sin(r),u=n+o,s=t+a,c=1^f,h=f?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);this._x1===null?this._+="M"+u+","+s:(Math.abs(this._x1-u)>S||Math.abs(this._y1-s)>S)&&(this._+="L"+u+","+s),e&&(h<0&&(h=h%an+an),h>ct?this._+="A"+e+","+e+",0,1,"+c+","+(n-o)+","+(t-a)+"A"+e+","+e+",0,1,"+c+","+(this._x1=u)+","+(this._y1=s):h>S&&(this._+="A"+e+","+e+",0,"+ +(h>=rn)+","+c+","+(this._x1=n+e*Math.cos(i))+","+(this._y1=t+e*Math.sin(i))))},rect:function(n,t,e,r){this._+="M"+(this._x0=this._x1=+n)+","+(this._y0=this._y1=+t)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};function P(n){return function(){return n}}function lt(n){return typeof n=="object"&&"length"in n?n:Array.from(n)}function Tn(n){this._context=n}Tn.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line},point:function(n,t){switch(n=+n,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(n,t):this._context.moveTo(n,t);break;case 1:this._point=2;default:this._context.lineTo(n,t);break}}};function dt(n){return new Tn(n)}function gt(n){return n[0]}function xt(n){return n[1]}function Yt(n,t){var e=P(!0),r=null,i=dt,f=null;n=typeof n=="function"?n:n===void 0?gt:P(n),t=typeof t=="function"?t:t===void 0?xt:P(t);function o(a){var u,s=(a=lt(a)).length,c,h=!1,l;for(r==null&&(f=i(l=In())),u=0;u<=s;++u)!(u>8&15|t>>4&240,t>>4&15|t&240,(t&15)<<4|t&15,1):e===8?O(t>>24&255,t>>16&255,t>>8&255,(t&255)/255):e===4?O(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|t&240,((t&15)<<4|t&15)/255):null):(t=pt.exec(n))?new x(t[1],t[2],t[3],1):(t=yt.exec(n))?new x(t[1]*255/100,t[2]*255/100,t[3]*255/100,1):(t=wt.exec(n))?O(t[1],t[2],t[3],t[4]):(t=Mt.exec(n))?O(t[1]*255/100,t[2]*255/100,t[3]*255/100,t[4]):(t=vt.exec(n))?An(t[1],t[2]/100,t[3]/100,1):(t=_t.exec(n))?An(t[1],t[2]/100,t[3]/100,t[4]):wn.hasOwnProperty(n)?_n(wn[n]):n==="transparent"?new x(NaN,NaN,NaN,0):null}function _n(n){return new x(n>>16&255,n>>8&255,n&255,1)}function O(n,t,e,r){return r<=0&&(n=t=e=NaN),new x(n,t,e,r)}function kt(n){return n instanceof C||(n=z(n)),n?(n=n.rgb(),new x(n.r,n.g,n.b,n.opacity)):new x}function X(n,t,e,r){return arguments.length===1?kt(n):new x(n,t,e,r??1)}function x(n,t,e,r){this.r=+n,this.g=+t,this.b=+e,this.opacity=+r}hn(x,X,zn(C,{brighter:function(n){return n=n==null?V:Math.pow(V,n),new x(this.r*n,this.g*n,this.b*n,this.opacity)},darker:function(n){return n=n==null?I:Math.pow(I,n),new x(this.r*n,this.g*n,this.b*n,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Nn,formatHex:Nn,formatRgb:kn,toString:kn}));function Nn(){return"#"+Y(this.r)+Y(this.g)+Y(this.b)}function kn(){var n=this.opacity;return n=isNaN(n)?1:Math.max(0,Math.min(1,n)),(n===1?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(n===1?")":", "+n+")")}function Y(n){return n=Math.max(0,Math.min(255,Math.round(n)||0)),(n<16?"0":"")+n.toString(16)}function An(n,t,e,r){return r<=0?n=t=e=NaN:e<=0||e>=1?n=t=NaN:t<=0&&(n=NaN),new w(n,t,e,r)}function Cn(n){if(n instanceof w)return new w(n.h,n.s,n.l,n.opacity);if(n instanceof C||(n=z(n)),!n)return new w;if(n instanceof w)return n;n=n.rgb();var t=n.r/255,e=n.g/255,r=n.b/255,i=Math.min(t,e,r),f=Math.max(t,e,r),o=NaN,a=f-i,u=(f+i)/2;return a?(t===f?o=(e-r)/a+(e0&&u<1?0:o,new w(o,a,u,n.opacity)}function At(n,t,e,r){return arguments.length===1?Cn(n):new w(n,t,e,r??1)}function w(n,t,e,r){this.h=+n,this.s=+t,this.l=+e,this.opacity=+r}hn(w,At,zn(C,{brighter:function(n){return n=n==null?V:Math.pow(V,n),new w(this.h,this.s,this.l*n,this.opacity)},darker:function(n){return n=n==null?I:Math.pow(I,n),new w(this.h,this.s,this.l*n,this.opacity)},rgb:function(){var n=this.h%360+(this.h<0)*360,t=isNaN(n)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*t,i=2*e-r;return new x(J(n>=240?n-240:n+120,i,r),J(n,i,r),J(n<120?n+240:n-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var n=this.opacity;return n=isNaN(n)?1:Math.max(0,Math.min(1,n)),(n===1?"hsl(":"hsla(")+(this.h||0)+", "+(this.s||0)*100+"%, "+(this.l||0)*100+"%"+(n===1?")":", "+n+")")}}));function J(n,t,e){return(n<60?t+(e-t)*n/60:n<180?e:n<240?t+(e-t)*(240-n)/60:t)*255}function Fn(n,t,e,r,i){var f=n*n,o=f*n;return((1-3*n+3*f-o)*t+(4-6*f+3*o)*e+(1+3*n+3*f-3*o)*r+o*i)/6}function St(n){var t=n.length-1;return function(e){var r=e<=0?e=0:e>=1?(e=1,t-1):Math.floor(e*t),i=n[r],f=n[r+1],o=r>0?n[r-1]:2*i-f,a=r()=>n;function $n(n,t){return function(e){return n+e*t}}function Et(n,t,e){return n=Math.pow(n,e),t=Math.pow(t,e)-n,e=1/e,function(r){return Math.pow(n+r*t,e)}}function Kt(n,t){var e=t-n;return e?$n(n,e>180||e<-180?e-360*Math.round(e/360):e):U(isNaN(n)?t:n)}function Pt(n){return(n=+n)==1?Bn:function(t,e){return e-t?Et(t,e,n):U(isNaN(t)?e:t)}}function Bn(n,t){var e=t-n;return e?$n(n,e):U(isNaN(n)?t:n)}var Sn=function n(t){var e=Pt(t);function r(i,f){var o=e((i=X(i)).r,(f=X(f)).r),a=e(i.g,f.g),u=e(i.b,f.b),s=Bn(i.opacity,f.opacity);return function(c){return i.r=o(c),i.g=a(c),i.b=u(c),i.opacity=s(c),i+""}}return r.gamma=n,r}(1);function Dn(n){return function(t){var e=t.length,r=new Array(e),i=new Array(e),f=new Array(e),o,a;for(o=0;oe&&(f=t.slice(e,f),a[o]?a[o]+=f:a[++o]=f),(r=r[0])===(i=i[0])?a[o]?a[o]+=i:a[++o]=i:(a[++o]=null,u.push({i:o,x:Q(r,i)})),e=K.lastIndex;return et&&(e=n,n=t,t=e),function(r){return Math.max(n,Math.min(t,r))}}function $t(n,t,e){var r=n[0],i=n[1],f=t[0],o=t[1];return i2?Bt:$t,u=s=null,h}function h(l){return l==null||isNaN(l=+l)?f:(u||(u=a(n.map(r),t,e)))(r(o(l)))}return h.invert=function(l){return o(i((s||(s=a(t,n.map(r),Q)))(l)))},h.domain=function(l){return arguments.length?(n=Array.from(l,Ct),c()):n.slice()},h.range=function(l){return arguments.length?(t=Array.from(l),c()):t.slice()},h.rangeRound=function(l){return t=Array.from(l),e=Tt,c()},h.clamp=function(l){return arguments.length?(o=l?!0:j,c()):o!==j},h.interpolate=function(l){return arguments.length?(e=l,c()):e},h.unknown=function(l){return arguments.length?(f=l,h):f},function(l,p){return r=l,i=p,c()}}function Gt(){return Ot()(j,j)}function Zt(n,t,e,r){var i=Wn(n,t,e),f;switch(r=Z(r??",f"),r.type){case"s":{var o=Math.max(Math.abs(n),Math.abs(t));return r.precision==null&&!isNaN(f=st(i,o))&&(r.precision=f),Hn(r,o)}case"":case"e":case"g":case"p":case"r":{r.precision==null&&!isNaN(f=ht(i,Math.max(Math.abs(n),Math.abs(t))))&&(r.precision=f-(r.type==="e"));break}case"f":case"%":{r.precision==null&&!isNaN(f=ut(i))&&(r.precision=f-(r.type==="%")*2);break}}return Ln(r)}function Vt(n){var t=n.domain;return n.ticks=function(e){var r=t();return Kn(r[0],r[r.length-1],e??10)},n.tickFormat=function(e,r){var i=t();return Zt(i[0],i[i.length-1],e??10,r)},n.nice=function(e){e==null&&(e=10);var r=t(),i=0,f=r.length-1,o=r[i],a=r[f],u,s,c=10;for(a0;){if(s=jn(o,a,e),s===u)return r[i]=o,r[f]=a,t(r);if(s>0)o=Math.floor(o/s)*s,a=Math.ceil(a/s)*s;else if(s<0)o=Math.ceil(o*s)/s,a=Math.floor(a*s)/s;else break;u=s}return n},n}function Xt(){var n=Gt();return n.copy=function(){return Dt(n,Xt())},mt.apply(n,arguments),Vt(n)}export{Yn as $,At as A,Bn as B,C,cn as D,te as E,St as F,Rt as G,jt as H,On as I,qt as J,Tt as K,It as L,Sn as M,Wt as N,ne as O,Ct as P,Vt as Q,x as R,Ot as S,Dt as T,Kn as U,j as V,Jn as W,Gt as X,Jt as Y,Xt as Z,Yt as _,W as a,Zt as a0,X as a1,Ut as a2,Qt as b,En as c,ht as d,st as e,Z as f,Ln as g,Hn as h,ft as i,P as j,In as k,lt as l,dt as m,Un as n,mt as o,ut as p,hn as q,kt as r,zn as s,Wn as t,V as u,I as v,Kt as w,gt as x,xt as y,Q as z}; -//# sourceMappingURL=linear.955f0731.js.map diff --git a/spaces/HugoDzz/super-godot-galaxy/svelte.config.js b/spaces/HugoDzz/super-godot-galaxy/svelte.config.js deleted file mode 100644 index a2f7bad72da5bce8e4e46f372c64e2704e49d37d..0000000000000000000000000000000000000000 --- a/spaces/HugoDzz/super-godot-galaxy/svelte.config.js +++ /dev/null @@ -1,19 +0,0 @@ - -import adapter from '@sveltejs/adapter-static'; -import { vitePreprocess } from '@sveltejs/kit/vite'; - -/** @type {import('@sveltejs/kit').Config} */ -const config = { - // Consult https://kit.svelte.dev/docs/integrations#preprocessors - // for more information about preprocessors - preprocess: vitePreprocess(), - - kit: { - // adapter-auto only supports some environments, see https://kit.svelte.dev/docs/adapter-auto for a list. - // If your environment is not supported or you settled on a specific environment, switch out the adapter. - // See https://kit.svelte.dev/docs/adapters for more information about adapters. - adapter: adapter() - } -}; - -export default config; diff --git a/spaces/ICML2022/OFA/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh b/spaces/ICML2022/OFA/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh deleted file mode 100644 index e3efeb21d302ef8d9eae8f1d4b06434c593705f6..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -echo 'Cloning Moses github repository (for tokenization scripts)...' -git clone https://github.com/moses-smt/mosesdecoder.git - -SCRIPTS=mosesdecoder/scripts -TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl -CLEAN=$SCRIPTS/training/clean-corpus-n.perl -REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl - -URLS=( - "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz" - "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz" - "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz" - "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz" - "http://data.statmt.org/wmt17/translation-task/dev.tgz" - "http://statmt.org/wmt14/test-full.tgz" -) -CORPORA=( - "training/europarl-v7.de-en" - "commoncrawl.de-en" - "training-parallel-nc-v13/news-commentary-v13.de-en" - "rapid2016.de-en" -) - -if [ ! -d "$SCRIPTS" ]; then - echo "Please set SCRIPTS variable correctly to point to Moses scripts." - exit -fi - -src=en -tgt=de -lang=en-de -prep=wmt18_en_de -tmp=$prep/tmp -orig=orig -dev=dev/newstest2012 -codes=32000 -bpe=bpe.32k - -mkdir -p $orig $tmp $prep $bpe - -cd $orig - -for ((i=0;i<${#URLS[@]};++i)); do - url=${URLS[i]} - file=$(basename $url) - if [ -f $file ]; then - echo "$file already exists, skipping download" - else - wget "$url" - if [ -f $file ]; then - echo "$url successfully downloaded." - else - echo "$url not successfully downloaded." - exit 1 - fi - if [ ${file: -4} == ".tgz" ]; then - tar zxvf $file - elif [ ${file: -4} == ".tar" ]; then - tar xvf $file - fi - fi -done -cd .. - -echo "pre-processing train data..." -for l in $src $tgt; do - rm -rf $tmp/train.tags.$lang.tok.$l - for f in "${CORPORA[@]}"; do - cat $orig/$f.$l | \ - perl $REM_NON_PRINT_CHAR | \ - perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/train.tags.$lang.tok.$l - done -done - -echo "pre-processing test data..." -for l in $src $tgt; do - if [ "$l" == "$src" ]; then - t="src" - else - t="ref" - fi - grep '\s*//g' | \ - sed -e 's/\s*<\/seg>\s*//g' | \ - sed -e "s/\’/\'/g" | \ - perl $TOKENIZER -threads 8 -l $l -no-escape > $tmp/test.$l - echo "" -done - -# apply length filtering before BPE -perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train 1 100 - -# use newstest2012 for valid -echo "pre-processing valid data..." -for l in $src $tgt; do - rm -rf $tmp/valid.$l - cat $orig/$dev.$l | \ - perl $REM_NON_PRINT_CHAR | \ - perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/valid.$l -done - -mkdir output -mv $tmp/{train,valid,test}.{$src,$tgt} output - -#BPE -git clone https://github.com/glample/fastBPE.git -pushd fastBPE -g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast -popd -fastBPE/fast learnbpe $codes output/train.$src output/train.$tgt > $bpe/codes -for split in {train,valid,test}; do for lang in {en,de}; do fastBPE/fast applybpe $bpe/$split.$lang output/$split.$lang $bpe/codes; done; done diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_multihead_attention.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_multihead_attention.py deleted file mode 100644 index 3cbd9d6785886e319aab0601517e27df733b6f97..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/sparse_multihead_attention.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch - -from .multihead_attention import MultiheadAttention - - -class SparseMultiheadAttention(MultiheadAttention): - """Sparse Multi-Headed Attention. - - "Generating Long Sequences with Sparse Transformers". Implements - fixed factorized self attention, where l=stride and c=expressivity. - A(1) includes all words in the stride window and A(2) takes a summary of c - words from the end of each stride window. - If is_bidirectional=False, we do not include any words past the current word, - as in the paper. - """ - - def __init__( - self, - embed_dim, - num_heads, - kdim=None, - vdim=None, - dropout=0.0, - bias=True, - add_bias_kv=False, - add_zero_attn=False, - self_attention=False, - encoder_decoder_attention=False, - stride=32, - expressivity=8, - is_bidirectional=True, - ): - - super().__init__( - embed_dim, - num_heads, - kdim, - vdim, - dropout, - bias, - add_bias_kv, - add_zero_attn, - self_attention, - encoder_decoder_attention, - ) - - self.is_bidirectional = is_bidirectional - self.stride = stride - self.expressivity = expressivity - assert self.stride > 0 and self.stride >= self.expressivity - - # Used for Ai(2) calculations - beginning of [l-c, l] range - def compute_checkpoint(self, word_index): - if word_index % self.stride == 0 and word_index != 0: - checkpoint_index = word_index - self.expressivity - else: - checkpoint_index = ( - math.floor(word_index / self.stride) * self.stride - + self.stride - - self.expressivity - ) - return checkpoint_index - - # Computes Ai(2) - def compute_subset_summaries(self, absolute_max): - checkpoint_index = self.compute_checkpoint(0) - subset_two = set() - while checkpoint_index <= absolute_max - 1: - summary = set( - range( - checkpoint_index, - min(checkpoint_index + self.expressivity + 1, absolute_max), - ) - ) - subset_two = subset_two.union(summary) - checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride) - return subset_two - - # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf - def compute_fixed_attention_subset(self, word_index, tgt_len): - # +1s account for range function; [min, max) -> [min, max] - if not self.is_bidirectional: - absolute_max = word_index + 1 - else: - absolute_max = tgt_len - - # Subset 1 - whole window - rounded_index = ( - math.floor((word_index + self.stride) / self.stride) * self.stride - ) - if word_index % self.stride == 0 and word_index != 0: - subset_one = set( - range(word_index - self.stride, min(absolute_max, word_index + 1)) - ) - else: - subset_one = set( - range( - max(0, rounded_index - self.stride), - min(absolute_max, rounded_index + 1), - ) - ) - - # Subset 2 - summary per window - # If bidirectional, subset 2 is the same for every index - subset_two = set() - if not self.is_bidirectional: - subset_two = self.compute_subset_summaries(absolute_max) - - return subset_one.union(subset_two) - - # Compute sparse mask - if bidirectional, can pre-compute and store - def buffered_sparse_mask(self, tensor, tgt_len, src_len): - assert tgt_len > self.stride - sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf")) - - # If bidirectional, subset 2 is the same for every index - subset_summaries = set() - if self.is_bidirectional: - subset_summaries = self.compute_subset_summaries(tgt_len) - - for i in range(tgt_len): - fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len) - fixed_attention_subset = fixed_attention_subset.union(subset_summaries) - included_word_indices = torch.LongTensor(list(fixed_attention_subset)) - sparse_mask[i].index_fill_(0, included_word_indices, 0) - return sparse_mask.type_as(tensor) - - def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): - sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len) - sparse_mask = sparse_mask.unsqueeze(0).expand( - bsz * self.num_heads, tgt_len, src_len - ) - attn_weights += sparse_mask diff --git a/spaces/ICML2022/resefa/utils/file_transmitters/dummy_file_transmitter.py b/spaces/ICML2022/resefa/utils/file_transmitters/dummy_file_transmitter.py deleted file mode 100644 index c553f4082061da9e6d8194dbbc2ce16f7a122554..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/resefa/utils/file_transmitters/dummy_file_transmitter.py +++ /dev/null @@ -1,34 +0,0 @@ -# python3.7 -"""Contains the class of dummy file transmitter. - -This file transmitter has all expected data transmission functions but behaves -silently, which is very useful in multi-processing mode. Only the chief process -can have the file transmitter with normal behavior. -""" - -from .base_file_transmitter import BaseFileTransmitter - -__all__ = ['DummyFileTransmitter'] - - -class DummyFileTransmitter(BaseFileTransmitter): - """Implements a dummy transmitter which transmits nothing.""" - - @staticmethod - def download_hard(src, dst): - return - - @staticmethod - def download_soft(src, dst): - return - - @staticmethod - def upload(src, dst): - return - - @staticmethod - def delete(path): - return - - def make_remote_dir(self, directory): - return diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/modules/losses/contperceptual.py b/spaces/Iceclear/StableSR/StableSR/ldm/modules/losses/contperceptual.py deleted file mode 100644 index aa8da1cf344ab7ff8d7e5fd4deb0dbfeb54536e8..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/ldm/modules/losses/contperceptual.py +++ /dev/null @@ -1,151 +0,0 @@ -import torch -import torch.nn as nn - -from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? - - -class LPIPSWithDiscriminator(nn.Module): - def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, - disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, - perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_loss="hinge"): - - super().__init__() - assert disc_loss in ["hinge", "vanilla"] - self.kl_weight = kl_weight - self.pixel_weight = pixelloss_weight - self.perceptual_loss = LPIPS().eval() - self.perceptual_weight = perceptual_weight - # output log variance - self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) - - self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, - n_layers=disc_num_layers, - use_actnorm=use_actnorm - ).apply(weights_init) - self.discriminator_iter_start = disc_start - self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss - self.disc_factor = disc_factor - self.discriminator_weight = disc_weight - self.disc_conditional = disc_conditional - - def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): - if last_layer is not None: - nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] - else: - nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] - - d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) - d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() - d_weight = d_weight * self.discriminator_weight - return d_weight - - def forward(self, inputs, reconstructions, posteriors, optimizer_idx, - global_step, last_layer=None, cond=None, split="train", - weights=None, return_dic=False): - rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - if self.perceptual_weight > 0: - p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) - rec_loss = rec_loss + self.perceptual_weight * p_loss - - nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar - weighted_nll_loss = nll_loss - if weights is not None: - weighted_nll_loss = weights*nll_loss - weighted_nll_loss = torch.mean(weighted_nll_loss) / weighted_nll_loss.shape[0] - nll_loss = torch.mean(nll_loss) / nll_loss.shape[0] - if self.kl_weight>0: - kl_loss = posteriors.kl() - kl_loss = torch.mean(kl_loss) / kl_loss.shape[0] - - # now the GAN part - if optimizer_idx == 0: - # generator update - if cond is None: - assert not self.disc_conditional - logits_fake = self.discriminator(reconstructions.contiguous()) - else: - assert self.disc_conditional - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) - g_loss = -torch.mean(logits_fake) - - if self.disc_factor > 0.0: - try: - d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) - except RuntimeError: - # assert not self.training - d_weight = torch.tensor(1.0) * self.discriminator_weight - else: - # d_weight = torch.tensor(0.0) - d_weight = torch.tensor(0.0) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - if self.kl_weight>0: - loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), - "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - } - if return_dic: - loss_dic = {} - loss_dic['total_loss'] = loss.clone().detach().mean() - loss_dic['logvar'] = self.logvar.detach() - loss_dic['kl_loss'] = kl_loss.detach().mean() - loss_dic['nll_loss'] = nll_loss.detach().mean() - loss_dic['rec_loss'] = rec_loss.detach().mean() - loss_dic['d_weight'] = d_weight.detach() - loss_dic['disc_factor'] = torch.tensor(disc_factor) - loss_dic['g_loss'] = g_loss.detach().mean() - else: - loss = weighted_nll_loss + d_weight * disc_factor * g_loss - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), - "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - } - if return_dic: - loss_dic = {} - loss_dic["{}/total_loss".format(split)] = loss.clone().detach().mean() - loss_dic["{}/logvar".format(split)] = self.logvar.detach() - loss_dic['nll_loss'.format(split)] = nll_loss.detach().mean() - loss_dic['rec_loss'.format(split)] = rec_loss.detach().mean() - loss_dic['d_weight'.format(split)] = d_weight.detach() - loss_dic['disc_factor'.format(split)] = torch.tensor(disc_factor) - loss_dic['g_loss'.format(split)] = g_loss.detach().mean() - - if return_dic: - return loss, log, loss_dic - return loss, log - - if optimizer_idx == 1: - # second pass for discriminator update - if cond is None: - logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator(reconstructions.contiguous().detach()) - else: - logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) - - log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), - "{}/logits_real".format(split): logits_real.detach().mean(), - "{}/logits_fake".format(split): logits_fake.detach().mean() - } - - if return_dic: - loss_dic = {} - loss_dic["{}/disc_loss".format(split)] = d_loss.clone().detach().mean() - loss_dic["{}/logits_real".format(split)] = logits_real.detach().mean() - loss_dic["{}/logits_fake".format(split)] = logits_fake.detach().mean() - return d_loss, log, loss_dic - - return d_loss, log diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/vdecoder/hifigan/nvSTFT.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros/vdecoder/hifigan/nvSTFT.py deleted file mode 100644 index 88597d62a505715091f9ba62d38bf0a85a31b95a..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/vdecoder/hifigan/nvSTFT.py +++ /dev/null @@ -1,111 +0,0 @@ -import math -import os -os.environ["LRU_CACHE_CAPACITY"] = "3" -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.util import normalize -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read -import soundfile as sf - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 32000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 32000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, center=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - if fmax not in self.mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - # print(111,spec) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - # print(222,spec) - spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec) - # print(333,spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - # print(444,spec) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() diff --git a/spaces/Illumotion/Koboldcpp/ggml-mpi.h b/spaces/Illumotion/Koboldcpp/ggml-mpi.h deleted file mode 100644 index eda119d449849a0de2764a71b0ba3d51e7d5522b..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/ggml-mpi.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -struct ggml_context; -struct ggml_tensor; -struct ggml_cgraph; - -#ifdef __cplusplus -extern "C" { -#endif - -struct ggml_mpi_context; - -void ggml_mpi_backend_init(void); -void ggml_mpi_backend_free(void); - -struct ggml_mpi_context * ggml_mpi_init(void); -void ggml_mpi_free(struct ggml_mpi_context * ctx); - -int ggml_mpi_rank(struct ggml_mpi_context * ctx); - -void ggml_mpi_eval_init( - struct ggml_mpi_context * ctx_mpi, - int * n_tokens, - int * n_past, - int * n_threads); - -void ggml_mpi_graph_compute_pre( - struct ggml_mpi_context * ctx_mpi, - struct ggml_cgraph * gf, - int n_layers); - -void ggml_mpi_graph_compute_post( - struct ggml_mpi_context * ctx_mpi, - struct ggml_cgraph * gf, - int n_layers); - -#ifdef __cplusplus -} -#endif diff --git a/spaces/Itsjusttasiaa/Test02/Dockerfile b/spaces/Itsjusttasiaa/Test02/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/Itsjusttasiaa/Test02/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/Jackflack09/diffuse-custom/Waifu2x/utils/__init__.py b/spaces/Jackflack09/diffuse-custom/Waifu2x/utils/__init__.py deleted file mode 100644 index fb1623a14865e1d1b1e79275a3d5595642f92d9b..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/Waifu2x/utils/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 -*- -# file: __init__.py -# time: 05/12/2022 -# author: yangheng -# github: https://github.com/yangheng95 -# huggingface: https://huggingface.co/yangheng -# google scholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en -# Copyright (C) 2021. All Rights Reserved. diff --git a/spaces/Jayyydyyy/english-tokipona-translator/README.md b/spaces/Jayyydyyy/english-tokipona-translator/README.md deleted file mode 100644 index ac7e28268f5b74b041784ca75dee0af609a333ec..0000000000000000000000000000000000000000 --- a/spaces/Jayyydyyy/english-tokipona-translator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: English / toki pona Translator -emoji: 💬 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Jmansoking/newbing/README.md b/spaces/Jmansoking/newbing/README.md deleted file mode 100644 index 190dcc71252d6dbd59d846e6d4f593e42e96d488..0000000000000000000000000000000000000000 --- a/spaces/Jmansoking/newbing/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Newbing -emoji: 🦀 -colorFrom: blue -colorTo: pink -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/dsp/compressor.py b/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/dsp/compressor.py deleted file mode 100644 index ab515f9ec9a36f43de4a08f3069119b9d73ff1ed..0000000000000000000000000000000000000000 --- a/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/dsp/compressor.py +++ /dev/null @@ -1,177 +0,0 @@ -import sys -import torch -import numpy as np -import scipy.signal -from numba import jit - -from deepafx_st.processors.processor import Processor - - -# Adapted from: https://github.com/drscotthawley/signaltrain/blob/master/signaltrain/audio.py -@jit(nopython=True) -def my_clip_min( - x: np.ndarray, - clip_min: float, -): # does the work of np.clip(), which numba doesn't support yet - # TODO: keep an eye on Numba PR https://github.com/numba/numba/pull/3468 that fixes this - inds = np.where(x < clip_min) - x[inds] = clip_min - return x - - -@jit(nopython=True) -def compressor( - x: np.ndarray, - sample_rate: float, - threshold: float = -24.0, - ratio: float = 2.0, - attack_time: float = 0.01, - release_time: float = 0.01, - knee_dB: float = 0.0, - makeup_gain_dB: float = 0.0, - dtype=np.float32, -): - """ - - Args: - x (np.ndarray): Input signal. - sample_rate (float): Sample rate in Hz. - threshold (float): Threhold in dB. - ratio (float): Ratio (should be >=1 , i.e. ratio:1). - attack_time (float): Attack time in seconds. - release_time (float): Release time in seconds. - knee_dB (float): Knee. - makeup_gain_dB (float): Makeup Gain. - dtype (type): Output type. Default: np.float32 - - Returns: - y (np.ndarray): Output signal. - - """ - # print(f"dsp comp fs = {sample_rate}") - - N = len(x) - dtype = x.dtype - y = np.zeros(N, dtype=dtype) - - # Initialize separate attack and release times - # Where do these numbers come from - alpha_A = np.exp(-np.log(9) / (sample_rate * attack_time)) - alpha_R = np.exp(-np.log(9) / (sample_rate * release_time)) - - # Turn the input signal into a uni-polar signal on the dB scale - x_G = 20 * np.log10(np.abs(x) + 1e-8) # x_uni casts type - - # Ensure there are no values of negative infinity - x_G = my_clip_min(x_G, -96) - - # Static characteristics with knee - y_G = np.zeros(N, dtype=dtype) - - # Below knee - idx = np.where((2 * (x_G - threshold)) < -knee_dB) - y_G[idx] = x_G[idx] - - # At knee - idx = np.where((2 * np.abs(x_G - threshold)) <= knee_dB) - y_G[idx] = x_G[idx] + ( - (1 / ratio) * (((x_G[idx] - threshold + knee_dB) / 2) ** 2) - ) / (2 * knee_dB) - - # Above knee threshold - idx = np.where((2 * (x_G - threshold)) > knee_dB) - y_G[idx] = threshold + ((x_G[idx] - threshold) / ratio) - - x_L = x_G - y_G - - # this loop is slow but not vectorizable due to its cumulative, sequential nature. @autojit makes it fast(er). - y_L = np.zeros(N, dtype=dtype) - for n in range(1, N): - # smooth over the gainChange - if x_L[n] > y_L[n - 1]: # attack mode - y_L[n] = (alpha_A * y_L[n - 1]) + ((1 - alpha_A) * x_L[n]) - else: # release - y_L[n] = (alpha_R * y_L[n - 1]) + ((1 - alpha_R) * x_L[n]) - - # Convert to linear amplitude scalar; i.e. map from dB to amplitude - lin_y_L = np.power(10.0, (-y_L / 20.0)) - y = lin_y_L * x # Apply linear amplitude to input sample - - y *= np.power(10.0, makeup_gain_dB / 20.0) # apply makeup gain - - return y.astype(dtype) - - -class Compressor(Processor): - def __init__( - self, - sample_rate, - max_threshold=0.0, - min_threshold=-80, - max_ratio=20.0, - min_ratio=1.0, - max_attack=0.1, - min_attack=0.0001, - max_release=1.0, - min_release=0.005, - max_knee=12.0, - min_knee=0.0, - max_mkgain=48.0, - min_mkgain=-48.0, - eps=1e-8, - ): - """ """ - super().__init__() - self.sample_rate = sample_rate - self.eps = eps - self.ports = [ - { - "name": "Threshold", - "min": min_threshold, - "max": max_threshold, - "default": -12.0, - "units": "", - }, - { - "name": "Ratio", - "min": min_ratio, - "max": max_ratio, - "default": 2.0, - "units": "", - }, - { - "name": "Attack Time", - "min": min_attack, - "max": max_attack, - "default": 0.001, - "units": "s", - }, - { - "name": "Release Time", - "min": min_release, - "max": max_release, - "default": 0.045, - "units": "s", - }, - { - "name": "Knee", - "min": min_knee, - "max": max_knee, - "default": 6.0, - "units": "dB", - }, - { - "name": "Makeup Gain", - "min": min_mkgain, - "max": max_mkgain, - "default": 0.0, - "units": "dB", - }, - ] - - self.num_control_params = len(self.ports) - self.process_fn = compressor - - def forward(self, x, p, sample_rate=24000, **kwargs): - "All processing in the forward is in numpy." - return self.run_series(x, p, sample_rate) diff --git a/spaces/KKMobile/MagicPrompt-Stable-Diffusion/README.md b/spaces/KKMobile/MagicPrompt-Stable-Diffusion/README.md deleted file mode 100644 index bd07440b3ab29ffc1733d9ab75237c644816b590..0000000000000000000000000000000000000000 --- a/spaces/KKMobile/MagicPrompt-Stable-Diffusion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MagicPrompt Stable Diffusion -emoji: 😻 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_sidd.py b/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_sidd.py deleted file mode 100644 index 6e4058690a7227f16866306ad6a2ea24d369e8ca..0000000000000000000000000000000000000000 --- a/spaces/Kayson/InstructDiffusion/dataset/low_level/lowlevel_sidd.py +++ /dev/null @@ -1,96 +0,0 @@ -# -------------------------------------------------------- -# InstructDiffusion -# Based on instruct-pix2pix (https://github.com/timothybrooks/instruct-pix2pix) -# Modified by Chen Li (edward82@stu.xjtu.edu.cn) -# -------------------------------------------------------- - -import os -import numpy as np -from torch.utils.data import Dataset -import torch -from PIL import Image -import torchvision.transforms.functional as TF -from pdb import set_trace as stx -import random -import cv2 -from PIL import Image -import torchvision - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in ['jpeg', 'JPEG', 'jpg', 'png', 'JPG', 'PNG', 'gif']) - - -class SIDD(Dataset): - def __init__(self, path, split="train", size=256, interpolation="pil_lanczos", - flip_prob=0.5, sample_weight=1.0, instruct=False): - super(SIDD, self).__init__() - - inp_files = sorted(os.listdir(os.path.join(path, split, 'input'))) - tar_files = sorted(os.listdir(os.path.join(path, split, 'gt'))) - - self.inp_filenames = [os.path.join(path, split, 'input', x) for x in inp_files if is_image_file(x)] - self.tar_filenames = [os.path.join(path, split, 'gt', x) for x in tar_files if is_image_file(x)] - - self.size = size - self.flip_prob = flip_prob - self.sample_weight = sample_weight - self.instruct = instruct - self.sizex = len(self.tar_filenames) # get the size of target - - self.interpolation = { - "cv_nearest": cv2.INTER_NEAREST, - "cv_bilinear": cv2.INTER_LINEAR, - "cv_bicubic": cv2.INTER_CUBIC, - "cv_area": cv2.INTER_AREA, - "cv_lanczos": cv2.INTER_LANCZOS4, - "pil_nearest": Image.NEAREST, - "pil_bilinear": Image.BILINEAR, - "pil_bicubic": Image.BICUBIC, - "pil_box": Image.BOX, - "pil_hamming": Image.HAMMING, - "pil_lanczos": Image.LANCZOS, - }[interpolation] - - prompt_path='dataset/prompt/prompt_denoise.txt' - self.prompt_list=[] - with open(prompt_path) as f: - line=f.readline() - while line: - line=line.strip('\n') - self.prompt_list.append(line) - line=f.readline() - print(f"SIDD has {len(self)} samples!!") - - def __len__(self): - return int(self.sizex * self.sample_weight) - - def __getitem__(self, index): - if self.sample_weight >= 1: - index_ = index % self.sizex - else: - index_ = int(index / self.sample_weight) + random.randint(0, int(1 / self.sample_weight) - 1) - - inp_path = self.inp_filenames[index_] - tar_path = self.tar_filenames[index_] - - inp_img = Image.open(inp_path) - tar_img = Image.open(tar_path) - - width, height = inp_img.size - tar_width, tar_height = tar_img.size - assert tar_width == width and tar_height == height, "Input and target image mismatch" - - inp_img = np.array(inp_img).astype(np.float32).transpose(2, 0, 1) - inp_img_tensor = torch.tensor((inp_img / 127.5 - 1.0).astype(np.float32)) - tar_img = np.array(tar_img).astype(np.float32).transpose(2, 0, 1) - tar_img_tensor = torch.tensor((tar_img / 127.5 - 1.0).astype(np.float32)) - crop = torchvision.transforms.RandomCrop(self.size) - flip = torchvision.transforms.RandomHorizontalFlip(float(self.flip_prob)) - image_0, image_1 = flip(crop(torch.cat((inp_img_tensor, tar_img_tensor)))).chunk(2) - - prompt = random.choice(self.prompt_list) - if self.instruct: - prompt = "Image Denoising: " + prompt - - return dict(edited=image_1, edit=dict(c_concat=image_0, c_crossattn=prompt)) \ No newline at end of file diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/fregan/meldataset.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/fregan/meldataset.py deleted file mode 100644 index 53b2c94e21d9ad3e2a33a6f4b1207a57e0016651..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/fregan/meldataset.py +++ /dev/null @@ -1,176 +0,0 @@ -import math -import os -import random -import torch -import torch.utils.data -import numpy as np -from librosa.util import normalize -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - - spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec) - spec = spectral_normalize_torch(spec) - - return spec - - -def get_dataset_filelist(a): - #with open(a.input_training_file, 'r', encoding='utf-8') as fi: - # training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav') - # for x in fi.read().split('\n') if len(x) > 0] - - #with open(a.input_validation_file, 'r', encoding='utf-8') as fi: - # validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav') - # for x in fi.read().split('\n') if len(x) > 0] - files = os.listdir(a.input_wavs_dir) - random.shuffle(files) - files = [os.path.join(a.input_wavs_dir, f) for f in files] - training_files = files[: -int(len(files) * 0.05)] - validation_files = files[-int(len(files) * 0.05):] - return training_files, validation_files - - -class MelDataset(torch.utils.data.Dataset): - def __init__(self, training_files, segment_size, n_fft, num_mels, - hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1, - device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None): - self.audio_files = training_files - random.seed(1234) - if shuffle: - random.shuffle(self.audio_files) - self.segment_size = segment_size - self.sampling_rate = sampling_rate - self.split = split - self.n_fft = n_fft - self.num_mels = num_mels - self.hop_size = hop_size - self.win_size = win_size - self.fmin = fmin - self.fmax = fmax - self.fmax_loss = fmax_loss - self.cached_wav = None - self.n_cache_reuse = n_cache_reuse - self._cache_ref_count = 0 - self.device = device - self.fine_tuning = fine_tuning - self.base_mels_path = base_mels_path - - def __getitem__(self, index): - filename = self.audio_files[index] - if self._cache_ref_count == 0: - #audio, sampling_rate = load_wav(filename) - #audio = audio / MAX_WAV_VALUE - audio = np.load(filename) - if not self.fine_tuning: - audio = normalize(audio) * 0.95 - self.cached_wav = audio - #if sampling_rate != self.sampling_rate: - # raise ValueError("{} SR doesn't match target {} SR".format( - # sampling_rate, self.sampling_rate)) - self._cache_ref_count = self.n_cache_reuse - else: - audio = self.cached_wav - self._cache_ref_count -= 1 - - audio = torch.FloatTensor(audio) - audio = audio.unsqueeze(0) - - if not self.fine_tuning: - if self.split: - if audio.size(1) >= self.segment_size: - max_audio_start = audio.size(1) - self.segment_size - audio_start = random.randint(0, max_audio_start) - audio = audio[:, audio_start:audio_start+self.segment_size] - else: - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant') - - mel = mel_spectrogram(audio, self.n_fft, self.num_mels, - self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax, - center=False) - else: - mel_path = os.path.join(self.base_mels_path, "mel" + "-" + filename.split("/")[-1].split("-")[-1]) - mel = np.load(mel_path).T - #mel = np.load( - # os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy')) - mel = torch.from_numpy(mel) - - if len(mel.shape) < 3: - mel = mel.unsqueeze(0) - - if self.split: - frames_per_seg = math.ceil(self.segment_size / self.hop_size) - - if audio.size(1) >= self.segment_size: - mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) - mel = mel[:, :, mel_start:mel_start + frames_per_seg] - audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size] - else: - mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant') - audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant') - - mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels, - self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss, - center=False) - - return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) - - def __len__(self): - return len(self.audio_files) \ No newline at end of file diff --git a/spaces/Kevin676/Clone-Your-Voice/encoder/model.py b/spaces/Kevin676/Clone-Your-Voice/encoder/model.py deleted file mode 100644 index e050d3204d8f1becdf0f8b3133470708e5420cea..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Clone-Your-Voice/encoder/model.py +++ /dev/null @@ -1,135 +0,0 @@ -from encoder.params_model import * -from encoder.params_data import * -from scipy.interpolate import interp1d -from sklearn.metrics import roc_curve -from torch.nn.utils import clip_grad_norm_ -from scipy.optimize import brentq -from torch import nn -import numpy as np -import torch - - -class SpeakerEncoder(nn.Module): - def __init__(self, device, loss_device): - super().__init__() - self.loss_device = loss_device - - # Network defition - self.lstm = nn.LSTM(input_size=mel_n_channels, - hidden_size=model_hidden_size, - num_layers=model_num_layers, - batch_first=True).to(device) - self.linear = nn.Linear(in_features=model_hidden_size, - out_features=model_embedding_size).to(device) - self.relu = torch.nn.ReLU().to(device) - - # Cosine similarity scaling (with fixed initial parameter values) - self.similarity_weight = nn.Parameter(torch.tensor([10.])).to(loss_device) - self.similarity_bias = nn.Parameter(torch.tensor([-5.])).to(loss_device) - - # Loss - self.loss_fn = nn.CrossEntropyLoss().to(loss_device) - - def do_gradient_ops(self): - # Gradient scale - self.similarity_weight.grad *= 0.01 - self.similarity_bias.grad *= 0.01 - - # Gradient clipping - clip_grad_norm_(self.parameters(), 3, norm_type=2) - - def forward(self, utterances, hidden_init=None): - """ - Computes the embeddings of a batch of utterance spectrograms. - - :param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape - (batch_size, n_frames, n_channels) - :param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers, - batch_size, hidden_size). Will default to a tensor of zeros if None. - :return: the embeddings as a tensor of shape (batch_size, embedding_size) - """ - # Pass the input through the LSTM layers and retrieve all outputs, the final hidden state - # and the final cell state. - out, (hidden, cell) = self.lstm(utterances, hidden_init) - - # We take only the hidden state of the last layer - embeds_raw = self.relu(self.linear(hidden[-1])) - - # L2-normalize it - embeds = embeds_raw / (torch.norm(embeds_raw, dim=1, keepdim=True) + 1e-5) - - return embeds - - def similarity_matrix(self, embeds): - """ - Computes the similarity matrix according the section 2.1 of GE2E. - - :param embeds: the embeddings as a tensor of shape (speakers_per_batch, - utterances_per_speaker, embedding_size) - :return: the similarity matrix as a tensor of shape (speakers_per_batch, - utterances_per_speaker, speakers_per_batch) - """ - speakers_per_batch, utterances_per_speaker = embeds.shape[:2] - - # Inclusive centroids (1 per speaker). Cloning is needed for reverse differentiation - centroids_incl = torch.mean(embeds, dim=1, keepdim=True) - centroids_incl = centroids_incl.clone() / (torch.norm(centroids_incl, dim=2, keepdim=True) + 1e-5) - - # Exclusive centroids (1 per utterance) - centroids_excl = (torch.sum(embeds, dim=1, keepdim=True) - embeds) - centroids_excl /= (utterances_per_speaker - 1) - centroids_excl = centroids_excl.clone() / (torch.norm(centroids_excl, dim=2, keepdim=True) + 1e-5) - - # Similarity matrix. The cosine similarity of already 2-normed vectors is simply the dot - # product of these vectors (which is just an element-wise multiplication reduced by a sum). - # We vectorize the computation for efficiency. - sim_matrix = torch.zeros(speakers_per_batch, utterances_per_speaker, - speakers_per_batch).to(self.loss_device) - mask_matrix = 1 - np.eye(speakers_per_batch, dtype=np.int) - for j in range(speakers_per_batch): - mask = np.where(mask_matrix[j])[0] - sim_matrix[mask, :, j] = (embeds[mask] * centroids_incl[j]).sum(dim=2) - sim_matrix[j, :, j] = (embeds[j] * centroids_excl[j]).sum(dim=1) - - ## Even more vectorized version (slower maybe because of transpose) - # sim_matrix2 = torch.zeros(speakers_per_batch, speakers_per_batch, utterances_per_speaker - # ).to(self.loss_device) - # eye = np.eye(speakers_per_batch, dtype=np.int) - # mask = np.where(1 - eye) - # sim_matrix2[mask] = (embeds[mask[0]] * centroids_incl[mask[1]]).sum(dim=2) - # mask = np.where(eye) - # sim_matrix2[mask] = (embeds * centroids_excl).sum(dim=2) - # sim_matrix2 = sim_matrix2.transpose(1, 2) - - sim_matrix = sim_matrix * self.similarity_weight + self.similarity_bias - return sim_matrix - - def loss(self, embeds): - """ - Computes the softmax loss according the section 2.1 of GE2E. - - :param embeds: the embeddings as a tensor of shape (speakers_per_batch, - utterances_per_speaker, embedding_size) - :return: the loss and the EER for this batch of embeddings. - """ - speakers_per_batch, utterances_per_speaker = embeds.shape[:2] - - # Loss - sim_matrix = self.similarity_matrix(embeds) - sim_matrix = sim_matrix.reshape((speakers_per_batch * utterances_per_speaker, - speakers_per_batch)) - ground_truth = np.repeat(np.arange(speakers_per_batch), utterances_per_speaker) - target = torch.from_numpy(ground_truth).long().to(self.loss_device) - loss = self.loss_fn(sim_matrix, target) - - # EER (not backpropagated) - with torch.no_grad(): - inv_argmax = lambda i: np.eye(1, speakers_per_batch, i, dtype=np.int)[0] - labels = np.array([inv_argmax(i) for i in ground_truth]) - preds = sim_matrix.detach().cpu().numpy() - - # Snippet from https://yangcha.github.io/EER-ROC/ - fpr, tpr, thresholds = roc_curve(labels.flatten(), preds.flatten()) - eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.) - - return loss, eer diff --git a/spaces/KingChronos/ChatGPT4/app.py b/spaces/KingChronos/ChatGPT4/app.py deleted file mode 100644 index 7e09e57ef928fd2451fd0ed1295d0994ca75d026..0000000000000000000000000000000000000000 --- a/spaces/KingChronos/ChatGPT4/app.py +++ /dev/null @@ -1,193 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Huggingface provided GPT4 OpenAI API Key -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") - -#Inferenec function -def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" - } - print(f"system message is ^^ {system_msg}") - if system_msg.strip() == '': - initial_message = [{"role": "user", "content": f"{inputs}"},] - multi_turn_message = [] - else: - initial_message= [{"role": "system", "content": system_msg}, - {"role": "user", "content": f"{inputs}"},] - multi_turn_message = [{"role": "system", "content": system_msg},] - - if chat_counter == 0 : - payload = { - "model": "gpt-4", - "messages": initial_message , - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - print(f"chat_counter - {chat_counter}") - else: #if chat_counter != 0 : - messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},] - for data in chatbot: - user = {} - user["role"] = "user" - user["content"] = data[0] - assistant = {} - assistant["role"] = "assistant" - assistant["content"] = data[1] - messages.append(user) - messages.append(assistant) - temp = {} - temp["role"] = "user" - temp["content"] = inputs - messages.append(temp) - #messages - payload = { - "model": "gpt-4", - "messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0,} - - chat_counter+=1 - - history.append(inputs) - print(f"Logging : payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - print(f"Logging : response code - {response}") - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history} - -#Resetting to blank -def reset_textbox(): - return gr.update(value='') - -#to set a component as visible=False -def set_visible_false(): - return gr.update(visible=False) - -#to set a component as visible=True -def set_visible_true(): - return gr.update(visible=True) - -title = """

🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming

""" - -#display message for themes feature -theme_addon_msg = """
🌟 Discover Gradio Themes with this Demo, featuring v3.22.0! Gradio v3.23.0 also enables seamless Theme sharing. You can develop or modify a theme, and send it to the hub using simple theme.push_to_hub(). -
🏆Participate in Gradio's Theme Building Hackathon to exhibit your creative flair and win fabulous rewards! Join here - Gradio-Themes-Party🎨 🏆
-""" - -#Using info to add additional information about System message in GPT4 -system_msg_info = """A conversation could begin with a system message to gently instruct the assistant. -System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'""" - -#Modifying existing Gradio Theme -theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green", - text_size=gr.themes.sizes.text_lg) - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""

🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌

""") - gr.HTML(theme_addon_msg) - gr.HTML('''
Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
''') - - with gr.Column(elem_id = "col_container"): - #GPT4 API Key is provided by Huggingface - with gr.Accordion(label="System message:", open=False): - system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="") - accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False) - chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot") - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") - state = gr.State([]) - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button().style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #top_p, temperature - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - #Event handling - inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - - inputs.submit(set_visible_false, [], [system_msg]) - b1.click(set_visible_false, [], [system_msg]) - inputs.submit(set_visible_true, [], [accordion_msg]) - b1.click(set_visible_true, [], [accordion_msg]) - - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #Examples - with gr.Accordion(label="Examples for System message:", open=False): - gr.Examples( - examples = [["""You are an AI programming assistant. - - - Follow the user's requirements carefully and to the letter. - - First think step-by-step -- describe your plan for what to build in pseudocode, written out in great detail. - - Then output the code in a single code block. - - Minimize any other prose."""], ["""You are ComedianGPT who is a helpful assistant. You answer everything with a joke and witty replies."""], - ["You are ChefGPT, a helpful assistant who answers questions with culinary expertise and a pinch of humor."], - ["You are FitnessGuruGPT, a fitness expert who shares workout tips and motivation with a playful twist."], - ["You are SciFiGPT, an AI assistant who discusses science fiction topics with a blend of knowledge and wit."], - ["You are PhilosopherGPT, a thoughtful assistant who responds to inquiries with philosophical insights and a touch of humor."], - ["You are EcoWarriorGPT, a helpful assistant who shares environment-friendly advice with a lighthearted approach."], - ["You are MusicMaestroGPT, a knowledgeable AI who discusses music and its history with a mix of facts and playful banter."], - ["You are SportsFanGPT, an enthusiastic assistant who talks about sports and shares amusing anecdotes."], - ["You are TechWhizGPT, a tech-savvy AI who can help users troubleshoot issues and answer questions with a dash of humor."], - ["You are FashionistaGPT, an AI fashion expert who shares style advice and trends with a sprinkle of wit."], - ["You are ArtConnoisseurGPT, an AI assistant who discusses art and its history with a blend of knowledge and playful commentary."], - ["You are a helpful assistant that provides detailed and accurate information."], - ["You are an assistant that speaks like Shakespeare."], - ["You are a friendly assistant who uses casual language and humor."], - ["You are a financial advisor who gives expert advice on investments and budgeting."], - ["You are a health and fitness expert who provides advice on nutrition and exercise."], - ["You are a travel consultant who offers recommendations for destinations, accommodations, and attractions."], - ["You are a movie critic who shares insightful opinions on films and their themes."], - ["You are a history enthusiast who loves to discuss historical events and figures."], - ["You are a tech-savvy assistant who can help users troubleshoot issues and answer questions about gadgets and software."], - ["You are an AI poet who can compose creative and evocative poems on any given topic."],], - inputs = system_msg,) - -demo.queue(max_size=99, concurrency_count=20).launch(debug=True) \ No newline at end of file diff --git a/spaces/KyanChen/BuildingExtraction/Tools/GetImgMeanStd.py b/spaces/KyanChen/BuildingExtraction/Tools/GetImgMeanStd.py deleted file mode 100644 index ea4b9e26336ce2cc340e10c6e8d8258b6305a886..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/BuildingExtraction/Tools/GetImgMeanStd.py +++ /dev/null @@ -1,52 +0,0 @@ -import os -import pandas as pd -from skimage import io -import numpy as np -import json -import tqdm - - -# R, G, B -class GetImgMeanStd: - def __init__(self, data_file): - assert os.path.exists(data_file), 'train.csv dose not exist!' - self.data_info = pd.read_csv(data_file, index_col=0) - self.save_path_mean_std_info = 'generate_dep_info' - self.mean = None - self.std = None - - def get_img_mean_std(self): - means = [] - stds = [] - bar = tqdm.tqdm(total=len(self.data_info)) - for row in self.data_info.iterrows(): - bar.update(1) - img_name = row[1]['img'] - # print(img_name) - img = io.imread(img_name) - img = img / 255. - assert img is not None, img_name + 'is not valid' - # height*width*channels, axis=0 is the first dim - mean = np.mean(np.mean(img, axis=0), axis=0) - means.append(mean) - std = np.std(np.std(img, axis=0), axis=0) - stds.append(std) - bar.close() - self.mean = np.mean(np.array(means), axis=0).tolist() - self.std = np.mean(np.array(stds), axis=0).tolist() - return {'mean': self.mean, 'std': self.std} - - def write_mean_std_information(self): - info = self.get_img_mean_std() - writer = os.path.join(self.save_path_mean_std_info, 'mean_std_info_test.json') - with open(writer, 'w') as f_writer: - json.dump(info, f_writer) - print('\'PRIOR_MEAN\': %s\n\'PRIOR_STD\': %s\n' % (info['mean'], info['std'])) - - -if __name__ == '__main__': - data_file = r'generate_dep_info/train_data.csv' - getImgMeanStd = GetImgMeanStd(data_file) - getImgMeanStd.write_mean_std_information() - - diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/task_modules/assigners/uniform_assigner.py b/spaces/KyanChen/RSPrompter/mmdet/models/task_modules/assigners/uniform_assigner.py deleted file mode 100644 index 9a83bfd0b46a3690dce9cf0adf2c1e676f304d06..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/task_modules/assigners/uniform_assigner.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional - -import torch -from mmengine.structures import InstanceData - -from mmdet.registry import TASK_UTILS -from mmdet.structures.bbox import bbox_xyxy_to_cxcywh -from mmdet.utils import ConfigType -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@TASK_UTILS.register_module() -class UniformAssigner(BaseAssigner): - """Uniform Matching between the priors and gt boxes, which can achieve - balance in positive priors, and gt_bboxes_ignore was not considered for - now. - - Args: - pos_ignore_thr (float): the threshold to ignore positive priors - neg_ignore_thr (float): the threshold to ignore negative priors - match_times(int): Number of positive priors for each gt box. - Defaults to 4. - iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou - calculator. Defaults to ``dict(type='BboxOverlaps2D')`` - """ - - def __init__(self, - pos_ignore_thr: float, - neg_ignore_thr: float, - match_times: int = 4, - iou_calculator: ConfigType = dict(type='BboxOverlaps2D')): - self.match_times = match_times - self.pos_ignore_thr = pos_ignore_thr - self.neg_ignore_thr = neg_ignore_thr - self.iou_calculator = TASK_UTILS.build(iou_calculator) - - def assign( - self, - pred_instances: InstanceData, - gt_instances: InstanceData, - gt_instances_ignore: Optional[InstanceData] = None - ) -> AssignResult: - """Assign gt to priors. - - The assignment is done in following steps - - 1. assign -1 by default - 2. compute the L1 cost between boxes. Note that we use priors and - predict boxes both - 3. compute the ignore indexes use gt_bboxes and predict boxes - 4. compute the ignore indexes of positive sample use priors and - predict boxes - - - Args: - pred_instances (:obj:`InstaceData`): Instances of model - predictions. It includes ``priors``, and the priors can - be priors, points, or bboxes predicted by the model, - shape(n, 4). - gt_instances (:obj:`InstaceData`): Ground truth of instance - annotations. It usually includes ``bboxes`` and ``labels`` - attributes. - gt_instances_ignore (:obj:`InstaceData`, optional): Instances - to be ignored during training. It includes ``bboxes`` - attribute data that is ignored during training and testing. - Defaults to None. - - Returns: - :obj:`AssignResult`: The assign result. - """ - - gt_bboxes = gt_instances.bboxes - gt_labels = gt_instances.labels - priors = pred_instances.priors - bbox_pred = pred_instances.decoder_priors - - num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) - - # 1. assign -1 by default - assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), - 0, - dtype=torch.long) - assigned_labels = bbox_pred.new_full((num_bboxes, ), - -1, - dtype=torch.long) - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - if num_gts == 0: - # No ground truth, assign all to background - assigned_gt_inds[:] = 0 - assign_result = AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) - assign_result.set_extra_property( - 'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) - assign_result.set_extra_property('pos_predicted_boxes', - bbox_pred.new_empty((0, 4))) - assign_result.set_extra_property('target_boxes', - bbox_pred.new_empty((0, 4))) - return assign_result - - # 2. Compute the L1 cost between boxes - # Note that we use priors and predict boxes both - cost_bbox = torch.cdist( - bbox_xyxy_to_cxcywh(bbox_pred), - bbox_xyxy_to_cxcywh(gt_bboxes), - p=1) - cost_bbox_priors = torch.cdist( - bbox_xyxy_to_cxcywh(priors), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) - - # We found that topk function has different results in cpu and - # cuda mode. In order to ensure consistency with the source code, - # we also use cpu mode. - # TODO: Check whether the performance of cpu and cuda are the same. - C = cost_bbox.cpu() - C1 = cost_bbox_priors.cpu() - - # self.match_times x n - index = torch.topk( - C, # c=b,n,x c[i]=n,x - k=self.match_times, - dim=0, - largest=False)[1] - - # self.match_times x n - index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] - # (self.match_times*2) x n - indexes = torch.cat((index, index1), - dim=1).reshape(-1).to(bbox_pred.device) - - pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) - anchor_overlaps = self.iou_calculator(priors, gt_bboxes) - pred_max_overlaps, _ = pred_overlaps.max(dim=1) - anchor_max_overlaps, _ = anchor_overlaps.max(dim=0) - - # 3. Compute the ignore indexes use gt_bboxes and predict boxes - ignore_idx = pred_max_overlaps > self.neg_ignore_thr - assigned_gt_inds[ignore_idx] = -1 - - # 4. Compute the ignore indexes of positive sample use priors - # and predict boxes - pos_gt_index = torch.arange( - 0, C1.size(1), - device=bbox_pred.device).repeat(self.match_times * 2) - pos_ious = anchor_overlaps[indexes, pos_gt_index] - pos_ignore_idx = pos_ious < self.pos_ignore_thr - - pos_gt_index_with_ignore = pos_gt_index + 1 - pos_gt_index_with_ignore[pos_ignore_idx] = -1 - assigned_gt_inds[indexes] = pos_gt_index_with_ignore - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - - assign_result = AssignResult( - num_gts, - assigned_gt_inds, - anchor_max_overlaps, - labels=assigned_labels) - assign_result.set_extra_property('pos_idx', ~pos_ignore_idx) - assign_result.set_extra_property('pos_predicted_boxes', - bbox_pred[indexes]) - assign_result.set_extra_property('target_boxes', - gt_bboxes[pos_gt_index]) - return assign_result diff --git a/spaces/Laronix/Laronix_ASR_TTS_VC/local/app.vctk.py b/spaces/Laronix/Laronix_ASR_TTS_VC/local/app.vctk.py deleted file mode 100644 index 592a5927d0d78b3098f1e0dc0dc08308bb9f0471..0000000000000000000000000000000000000000 --- a/spaces/Laronix/Laronix_ASR_TTS_VC/local/app.vctk.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -TODO: - + [x] Load Configuration - + [ ] Checking - + [ ] Better saving directory -""" -import numpy as np -from pathlib import Path -import jiwer -import pdb -import torch.nn as nn -import torch -import torchaudio -from transformers import pipeline -from time import process_time, time -from pathlib import Path -# local import -import sys -from espnet2.bin.tts_inference import Text2Speech -# pdb.set_trace() -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -sys.path.append("src") - -import gradio as gr - -# ASR part - -audio_files = [str(x) for x in sorted(Path("/home/kevingeng/Disk2/laronix/laronix_automos/data/20230103_video").glob("**/*wav"))] -# audio_files = [str(x) for x in sorted(Path("./data/Patient_sil_trim_16k_normed_5_snr_40/Rainbow").glob("**/*wav"))] -# transcriber = pipeline("automatic-speech-recognition", model="KevinGeng/PAL_John_128_train_dev_test_seed_1") -transcriber = pipeline("automatic-speech-recognition", model="KevinGeng/PAL_John_128_p326_300_train_dev_test_seed_1") -# 【Female】kan-bayashi ljspeech parallel wavegan -# tts_model = Text2Speech.from_pretrained("espnet/kan-bayashi_ljspeech_vits") -# 【Male】fastspeech2-en-200_speaker-cv4, hifigan vocoder -# pdb.set_trace() -from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub -from fairseq.models.text_to_speech.hub_interface import TTSHubInterface - -#@title English multi-speaker pretrained model { run: "auto" } -lang = 'English' -tag = 'kan-bayashi/libritts_xvector_vits' -# tag = "kan-bayashi/vctk_multi_spk_vits" -# vits needs no -vocoder_tag = "parallel_wavegan/vctk_parallel_wavegan.v1.long" #@param ["none", "parallel_wavegan/vctk_parallel_wavegan.v1.long", "parallel_wavegan/vctk_multi_band_melgan.v2", "parallel_wavegan/vctk_style_melgan.v1", "parallel_wavegan/vctk_hifigan.v1", "parallel_wavegan/libritts_parallel_wavegan.v1.long", "parallel_wavegan/libritts_multi_band_melgan.v2", "parallel_wavegan/libritts_hifigan.v1", "parallel_wavegan/libritts_style_melgan.v1"] {type:"string"} -from espnet2.bin.tts_inference import Text2Speech -from espnet2.utils.types import str_or_none - -text2speech = Text2Speech.from_pretrained( - model_tag=str_or_none(tag), - vocoder_tag=str_or_none(vocoder_tag), - device="cuda", - use_att_constraint=False, - backward_window=1, - forward_window=3, - speed_control_alpha=1.0, -) - - -import glob -import os -import numpy as np -import kaldiio - -# Get model directory path -from espnet_model_zoo.downloader import ModelDownloader -d = ModelDownloader() -model_dir = os.path.dirname(d.download_and_unpack(tag)["train_config"]) - -# Speaker x-vector selection - -xvector_ark = [p for p in glob.glob(f"{model_dir}/../../dump/**/spk_xvector.ark", recursive=True) if "tr" in p][0] -xvectors = {k: v for k, v in kaldiio.load_ark(xvector_ark)} -spks = list(xvectors.keys()) - -male_spks = {"M1": "2300_131720", "M2": "1320_122612", "M3": "1188_133604", "M4": "61_70970"} -female_spks = {"F1": "2961_961", "F2": "8463_287645", "F3": "121_121726"} -spks = dict(male_spks, **female_spks) -spk_names = sorted(spks.keys()) - -def ASRTTS(audio_file, spk_name, ref_text=""): - spk = spks[spk_name] - spembs = xvectors[spk] - if ref_text == "": - reg_text = transcriber(audio_file)['text'] - else: - reg_text = ref_text - - speech, sr = torchaudio.load(audio_file, channels_first=True) # Mono channel - wav_tensor_spembs = text2speech(text=reg_text, speech=speech, spembs=spembs)["wav"] - wav_numpy = wav_tensor_spembs.unsqueeze(1).to("cpu") - sample_rate = 22050 - save_id = "./wav/" + Path(audio_file).stem + "_" + spk_name +"_spkembs.wav" - torchaudio.save(save_id, src=wav_tensor_spembs.unsqueeze(0).to("cpu"), sample_rate=22050) - - return save_id, reg_text - -def ref_reg_callback(audio_file, spk_name, ref_text): - reg_text = ref_text - return audio_file, spk_name, reg_text - -reference_textbox = gr.Textbox( - value="", - placeholder="Input reference here", - label="Reference", -) - -recognization_textbox = gr.Textbox( - value="", - placeholder="Output recognization here", - label="recognization_textbox", -) - -speaker_option = gr.Radio(choices=spk_names, label="Speaker") - -input_audio = gr.Audio( - source="microphone", - type="filepath", - label="Audio_to_Evaluate" -) -output_audio = gr.Audio( - source="upload", - file="filepath", - label="Synthesized Audio" -) -examples = [["./samples/001.wav",'M1', ""], - ["./samples/002.wav",'M2', ""], - ["./samples/003.wav",'F1', ""], - ["./samples/004.wav",'F2', ""]] - -# ASRTTS(*examples[0]) -iface = gr.Interface( - fn = ASRTTS, - inputs = [ - input_audio, - speaker_option, - reference_textbox, - ], - outputs = [ - output_audio, - recognization_textbox - ], - examples = examples -) -iface.input_callback = ref_reg_callback -iface.launch(share=False) \ No newline at end of file diff --git a/spaces/Laughify/Among_Us_Logic_AI_Generator/app.py b/spaces/Laughify/Among_Us_Logic_AI_Generator/app.py deleted file mode 100644 index fcc0d5ec06454c6f29828d3b4d22e3e743d81b76..0000000000000000000000000000000000000000 --- a/spaces/Laughify/Among_Us_Logic_AI_Generator/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import gradio as gr -import torch -from torch import autocast -from diffusers import StableDiffusionPipeline -import random - -model = "Laughify/among-us-logic-ai-characters" -device = "cpu" - -pipe = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch.float32) -pipe = pipe.to(device) - -block = gr.Blocks(css=".container { max-width: 800px; margin: auto; }") - -def infer(prompt, width, height, nums, steps, guidance_scale, seed): - print(prompt) - print(width, height, nums, steps, guidance_scale, seed) - - if prompt is not None and prompt != "": - if seed is None or seed == '' or seed == -1: - seed = int(random.randrange(4294967294)) - generator = torch.Generator(device).manual_seed(seed) - images = pipe([prompt] * nums, height=height, width=width, num_inference_steps=steps, generator=generator, guidance_scale=guidance_scale )["sample"] - return images - -# with block as demo: -def run(): - _app = gr.Interface( - fn=infer, - title="Among Us Logic AI Character Generator", - inputs=[ - gr.Textbox(label="prompt"), - gr.Slider(512, 1024, 512, step=64, label="width"), - gr.Slider(512, 1024, 512, step=64, label="height"), - gr.Slider(1, 4, 1, step=1, label="Number of Images"), - gr.Slider(10, 150, step=1, value=50, - label="num_inference_steps:\n" - "The number of denoising steps. More de-scaling steps usually result in a higher quality image, but will slow down inference."), - gr.Slider(0, 20, 7.5, step=0.5, - label="guidance_scale:\n" + - "A higher boot ratio encourages the generation of images that are closely related to text \"hints\", often at the expense of reduced image quality"), - gr.Textbox(label="Random seed", - placeholder="Random Seed", - lines=1), - ], - outputs=[ - gr.Gallery(label="Generated images") - ]) - - return _app - - -app = run() -app.launch(debug=True) \ No newline at end of file diff --git a/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/attentions.py b/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/attentions.py deleted file mode 100644 index 05501be1871643f78dddbeaa529c96667031a8db..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Advanced-RVC-Inference/lib/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from lib.infer_pack import commons -from lib.infer_pack import modules -from lib.infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/tools/diffq/diffq.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/tools/diffq/diffq.py deleted file mode 100644 index b475ec7f55227417b014c69b5cf55033182113e1..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/tools/diffq/diffq.py +++ /dev/null @@ -1,286 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Differentiable quantizer based on scaled noise injection. -""" -from dataclasses import dataclass -import math -import typing as tp - -import torch - -from .base import BaseQuantizer -from .uniform import uniform_quantize, uniform_unquantize -from .utils import simple_repr - - -class DiffQuantizer(BaseQuantizer): - @dataclass - class _QuantizedParam(BaseQuantizer._QuantizedParam): - logit: torch.nn.Parameter - - def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False, - group_size: int = 1, min_bits: float = 2, max_bits: float = 15, - param="bits", noise="gaussian", - init_bits: float = 8, extra_bits: float = 0, suffix: str = "_diffq", - exclude: tp.List[str] = [], detect_bound: bool = True): - """ - Differentiable quantizer based on scaled noise injection. - For every parameter `p` in the model, this introduces a number of bits parameter - `b` with the same dimensions (when group_size = 1). - Before each forward, `p` is replaced by `p + U` - with U uniform iid noise with range [-d/2, d/2], with `d` the uniform quantization - step for `b` bits. - This noise approximates the quantization noise in a differentiable manner, both - with respect to the unquantized parameter `p` and the number of bits `b`. - - At eveluation (as detected with `model.eval()`), the model is replaced - by its true quantized version, and restored when going back to training. - - When doing actual quantization (for serialization, or evaluation), - the number of bits is rounded to the nearest integer, and needs to be stored along. - This will cost a few bits per dimension. To reduce this cost, one can use `group_size`, - which will use a single noise level for multiple weight entries. - - You can use the `DiffQuantizer.model_size` method to get a differentiable estimate of the - model size in MB. You can then use this estimate as a penalty in your training loss. - - Args: - model (torch.nn.Module): model to quantize - min_size (float): minimum size in MB of a parameter to be quantized. - float16 (bool): if a layer is smaller than min_size, should we still do float16? - group_size (int): weight entries are groupped together to reduce the number - of noise scales to store. This should divide the size of all parameters - bigger than min_size. - min_bits (float): minimal number of bits. - max_bits (float): maximal number of bits. - init_bits (float): initial number of bits. - extra_bits (float): extra bits to add for actual quantization (before roundoff). - suffix (str): suffix used for the name of the extra noise scale parameters. - exclude (list[str]): list of patterns used to match parameters to exclude. - For instance `['bias']` to exclude all bias terms. - detect_bound (bool): if True, will detect bound parameters and reuse - the same quantized tensor for both, as well as the same number of bits. - - ..Warning:: - You must call `model.training()` and `model.eval()` for `DiffQuantizer` work properly. - - """ - self.group_size = group_size - self.min_bits = min_bits - self.max_bits = max_bits - self.init_bits = init_bits - self.extra_bits = extra_bits - self.suffix = suffix - self.param = param - self.noise = noise - assert noise in ["gaussian", "uniform"] - self._optimizer_setup = False - - self._min_noise = 1 / (2 ** self.max_bits - 1) - self._max_noise = 1 / (2 ** self.min_bits - 1) - - assert group_size >= 0 - assert min_bits < init_bits < max_bits, \ - "init_bits must be between min_bits and max_bits excluded3" - - for name, _ in model.named_parameters(): - if name.endswith(suffix): - raise RuntimeError("The model already has some noise scales parameters, " - "maybe you used twice a DiffQuantizer on the same model?.") - - super().__init__(model, min_size, float16, exclude, detect_bound) - - def _get_bits(self, logit: torch.Tensor): - if self.param == "noise": - return torch.log2(1 + 1 / self._get_noise_scale(logit)) - else: - t = torch.sigmoid(logit) - return self.max_bits * t + (1 - t) * self.min_bits - - def _get_noise_scale(self, logit: torch.Tensor): - if self.param == "noise": - t = torch.sigmoid(logit) - return torch.exp(t * math.log(self._min_noise) + (1 - t) * math.log(self._max_noise)) - else: - return 1 / (2 ** self._get_bits(logit) - 1) - - def _register_param(self, name, param, module, other): - if other is not None: - return self.__class__._QuantizedParam( - name=name, param=param, module=module, logit=other.logit, other=other) - assert self.group_size == 0 or param.numel() % self.group_size == 0 - # we want the initial number of bits to be init_bits. - if self.param == "noise": - noise_scale = 1 / (2 ** self.init_bits - 1) - t = (math.log(noise_scale) - math.log(self._max_noise)) / ( - math.log(self._min_noise) - math.log(self._max_noise)) - else: - t = (self.init_bits - self.min_bits) / (self.max_bits - self.min_bits) - assert 0 < t < 1 - logit = torch.logit(torch.tensor(float(t))) - assert abs(self._get_bits(logit) - self.init_bits) < 1e-5 - if self.group_size > 0: - nparam = param.numel() // self.group_size - else: - nparam = 1 - logit = torch.nn.Parameter( - torch.full( - (nparam,), - logit, - device=param.device)) - module.register_parameter(name + self.suffix, logit) - return self.__class__._QuantizedParam( - name=name, param=param, module=module, logit=logit, other=None) - - def clear_optimizer(self, optimizer: torch.optim.Optimizer): - params = [qp.logit for qp in self._qparams] - - for group in optimizer.param_groups: - new_params = [] - for q in list(group["params"]): - matched = False - for p in params: - if p is q: - matched = True - if not matched: - new_params.append(q) - group["params"][:] = new_params - - def setup_optimizer(self, optimizer: torch.optim.Optimizer, - lr: float = 1e-3, **kwargs): - """ - Setup the optimizer to tune the number of bits. In particular, this will deactivate - weight decay for the bits parameters. - - Args: - optimizer (torch.Optimizer): optimizer to use. - lr (float): specific learning rate for the bits parameters. 1e-3 - is perfect for Adam.,w - kwargs (dict): overrides for other optimization parameters for the bits. - """ - assert not self._optimizer_setup - self._optimizer_setup = True - - params = [qp.logit for qp in self._qparams] - - for group in optimizer.param_groups: - for q in list(group["params"]): - for p in params: - if p is q: - raise RuntimeError("You should create the optimizer " - "before the quantizer!") - - group = {"params": params, "lr": lr, "weight_decay": 0} - group.update(kwargs) - optimizer.add_param_group(group) - - def no_optimizer(self): - """ - Call this if you do not want to use an optimizer. - """ - self._optimizer_setup = True - - def check_unused(self): - for qparam in self._qparams: - if qparam.other is not None: - continue - grad = qparam.param.grad - if grad is None or (grad == 0).all(): - if qparam.logit.grad is not None: - qparam.logit.grad.data.zero_() - - def model_size(self, exact=False): - """ - Differentiable estimate of the model size. - The size is returned in MB. - - If `exact` is True, then the output is no longer differentiable but - reflect exactly an achievable size, even without compression, - i.e.same as returned by `naive_model_size()`. - """ - total = super().model_size() - subtotal = 0 - for qparam in self._qparams: - # only count the first appearance of a Parameter - if qparam.other is not None: - continue - bits = self.extra_bits + self._get_bits(qparam.logit) - if exact: - bits = bits.round().clamp(1, 15) - if self.group_size == 0: - group_size = qparam.param.numel() - else: - group_size = self.group_size - subtotal += group_size * bits.sum() - subtotal += 2 * 32 # param scale - - # Number of bits to represent each number of bits - bits_bits = math.ceil(math.log2(1 + (bits.max().round().item() - self.min_bits))) - subtotal += 8 # 8 bits for bits_bits - subtotal += bits_bits * bits.numel() - - subtotal /= 2 ** 20 * 8 # bits -> MegaBytes - return total + subtotal - - def true_model_size(self): - """ - Naive model size without zlib compression. - """ - return self.model_size(exact=True).item() - - def _pre_forward_train(self): - if not self._optimizer_setup: - raise RuntimeError("You must call `setup_optimizer()` on your optimizer " - "before starting training.") - for qparam in self._qparams: - if qparam.other is not None: - noisy = qparam.other.module._parameters[qparam.other.name] - else: - bits = self._get_bits(qparam.logit)[:, None] - if self.group_size == 0: - p_flat = qparam.param.view(-1) - else: - p_flat = qparam.param.view(-1, self.group_size) - scale = p_flat.max() - p_flat.min() - unit = 1 / (2**bits - 1) - if self.noise == "uniform": - noise_source = (torch.rand_like(p_flat) - 0.5) - elif self.noise == "gaussian": - noise_source = torch.randn_like(p_flat) / 2 - noise = scale * unit * noise_source - noisy = p_flat + noise - # We bypass the checks by PyTorch on parameters being leafs - qparam.module._parameters[qparam.name] = noisy.view_as(qparam.param) - return True - - def _post_forward_train(self): - for qparam in self._qparams: - qparam.module._parameters[qparam.name] = qparam.param - return True - - def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any: - bits = self.extra_bits + self._get_bits(qparam.logit) - bits = bits.round().clamp(1, 15)[:, None].byte() - if self.group_size == 0: - p = qparam.param.data.view(-1) - else: - p = qparam.param.data.view(-1, self.group_size) - levels, scales = uniform_quantize(p, bits) - return levels, scales, bits - - def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor: - levels, param_scale, bits = quantized - return uniform_unquantize(levels, param_scale, bits).view_as(qparam.param.data) - - def detach(self): - super().detach() - for qparam in self._qparams: - delattr(qparam.module, qparam.name + self.suffix) - - def __repr__(self): - return simple_repr(self) diff --git a/spaces/Lianjd/stock_dashboard/backtrader/utils/flushfile.py b/spaces/Lianjd/stock_dashboard/backtrader/utils/flushfile.py deleted file mode 100644 index 6d0361456490af3be7a25d64298bfc350c11b26d..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/utils/flushfile.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import sys - - -class flushfile(object): - - def __init__(self, f): - self.f = f - - def write(self, x): - self.f.write(x) - self.f.flush() - - def flush(self): - self.f.flush() - -if sys.platform == 'win32': - sys.stdout = flushfile(sys.stdout) - sys.stderr = flushfile(sys.stderr) - - -class StdOutDevNull(object): - - def __init__(self): - self.stdout = sys.stdout - sys.stdout = self - - def write(self, x): - pass - - def flush(self): - pass - - def stop(self): - sys.stdout = self.stdout diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/util/image_pool.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/util/image_pool.py deleted file mode 100644 index 1e7846e7c203f5a3d3f8d7187f906990762396fa..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Global/util/image_pool.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import random -import torch -from torch.autograd import Variable - - -class ImagePool: - def __init__(self, pool_size): - self.pool_size = pool_size - if self.pool_size > 0: - self.num_imgs = 0 - self.images = [] - - def query(self, images): - if self.pool_size == 0: - return images - return_images = [] - for image in images.data: - image = torch.unsqueeze(image, 0) - if self.num_imgs < self.pool_size: - self.num_imgs = self.num_imgs + 1 - self.images.append(image) - return_images.append(image) - else: - p = random.uniform(0, 1) - if p > 0.5: - random_id = random.randint(0, self.pool_size - 1) - tmp = self.images[random_id].clone() - self.images[random_id] = image - return_images.append(tmp) - else: - return_images.append(image) - return_images = Variable(torch.cat(return_images, 0)) - return return_images diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/datasets/transforms.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/datasets/transforms.py deleted file mode 100644 index 91cf9269e4b31008a3ddca34a19b038a9b399991..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/datasets/transforms.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Transforms and data augmentation for both image + bbox. -""" -import os -import random - -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F - -from groundingdino.util.box_ops import box_xyxy_to_cxcywh -from groundingdino.util.misc import interpolate - - -def crop(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd", "positive_map"] - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target["masks"] = target["masks"][:, i : i + h, j : j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target["boxes"].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target["masks"].flatten(1).any(1) - - for field in fields: - if field in target: - target[field] = target[field][keep] - - if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO": - # for debug and visualization only. - if "strings_positive" in target: - target["strings_positive"] = [ - _i for _i, _j in zip(target["strings_positive"], keep) if _j - ] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor( - [w, 0, w, 0] - ) - target["boxes"] = boxes - - if "masks" in target: - target["masks"] = target["masks"].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - if max_size is not None: - min_original_size = float(min((w, h))) - max_original_size = float(max((w, h))) - if max_original_size / min_original_size * size > max_size: - size = int(round(max_size * min_original_size / max_original_size)) - - if (w <= h and w == size) or (h <= w and h == size): - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - size = get_size(image.size, size, max_size) - rescaled_image = F.resize(image, size) - - if target is None: - return rescaled_image, None - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor( - [ratio_width, ratio_height, ratio_width, ratio_height] - ) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - target["masks"] = ( - interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - ) - - return rescaled_image, target - - -def pad(image, target, padding): - # assumes that we only pad on the bottom right corners - padded_image = F.pad(image, (0, 0, padding[0], padding[1])) - if target is None: - return padded_image, None - target = target.copy() - # should we do something wrt the original size? - target["size"] = torch.tensor(padded_image.size[::-1]) - if "masks" in target: - target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1])) - return padded_image, target - - -class ResizeDebug(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - return resize(img, target, self.size) - - -class RandomCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - region = T.RandomCrop.get_params(img, self.size) - return crop(img, target, region) - - -class RandomSizeCrop(object): - def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False): - # respect_boxes: True to keep all boxes - # False to tolerence box filter - self.min_size = min_size - self.max_size = max_size - self.respect_boxes = respect_boxes - - def __call__(self, img: PIL.Image.Image, target: dict): - init_boxes = len(target["boxes"]) - max_patience = 10 - for i in range(max_patience): - w = random.randint(self.min_size, min(img.width, self.max_size)) - h = random.randint(self.min_size, min(img.height, self.max_size)) - region = T.RandomCrop.get_params(img, [h, w]) - result_img, result_target = crop(img, target, region) - if ( - not self.respect_boxes - or len(result_target["boxes"]) == init_boxes - or i == max_patience - 1 - ): - return result_img, result_target - return result_img, result_target - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.0)) - crop_left = int(round((image_width - crop_width) / 2.0)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class RandomResize(object): - def __init__(self, sizes, max_size=None): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - return resize(img, target, size, self.max_size) - - -class RandomPad(object): - def __init__(self, max_pad): - self.max_pad = max_pad - - def __call__(self, img, target): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - return pad(img, target, (pad_x, pad_y)) - - -class RandomSelect(object): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - - def __init__(self, transforms1, transforms2, p=0.5): - self.transforms1 = transforms1 - self.transforms2 = transforms2 - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return self.transforms1(img, target) - return self.transforms2(img, target) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class RandomErasing(object): - def __init__(self, *args, **kwargs): - self.eraser = T.RandomErasing(*args, **kwargs) - - def __call__(self, img, target): - return self.eraser(img), target - - -class Normalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target=None): - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - h, w = image.shape[-2:] - if "boxes" in target: - boxes = target["boxes"] - boxes = box_xyxy_to_cxcywh(boxes) - boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) - target["boxes"] = boxes - return image, target - - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string diff --git a/spaces/Makiing/coolb-in-gtest/src/lib/bots/bing/sr.ts b/spaces/Makiing/coolb-in-gtest/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/Makiing/coolb-in-gtest/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/MichaelWelsch/FreeVC/speaker_encoder/voice_encoder.py b/spaces/MichaelWelsch/FreeVC/speaker_encoder/voice_encoder.py deleted file mode 100644 index 88cdee2de76b72db58c5dd19a888597e0fe12fbb..0000000000000000000000000000000000000000 --- a/spaces/MichaelWelsch/FreeVC/speaker_encoder/voice_encoder.py +++ /dev/null @@ -1,173 +0,0 @@ -from speaker_encoder.hparams import * -from speaker_encoder import audio -from pathlib import Path -from typing import Union, List -from torch import nn -from time import perf_counter as timer -import numpy as np -import torch - - -class SpeakerEncoder(nn.Module): - def __init__(self, weights_fpath, device: Union[str, torch.device]=None, verbose=True): - """ - :param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). - If None, defaults to cuda if it is available on your machine, otherwise the model will - run on cpu. Outputs are always returned on the cpu, as numpy arrays. - """ - super().__init__() - - # Define the network - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - # Get the target device - if device is None: - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - elif isinstance(device, str): - device = torch.device(device) - self.device = device - - # Load the pretrained model'speaker weights - # weights_fpath = Path(__file__).resolve().parent.joinpath("pretrained.pt") - # if not weights_fpath.exists(): - # raise Exception("Couldn't find the voice encoder pretrained model at %s." % - # weights_fpath) - - start = timer() - checkpoint = torch.load(weights_fpath, map_location="cpu") - - self.load_state_dict(checkpoint["model_state"], strict=False) - self.to(device) - - if verbose: - print("Loaded the voice encoder model on %s in %.2f seconds." % - (device.type, timer() - start)) - - def forward(self, mels: torch.FloatTensor): - """ - Computes the embeddings of a batch of utterance spectrograms. - :param mels: a batch of mel spectrograms of same duration as a float32 tensor of shape - (batch_size, n_frames, n_channels) - :return: the embeddings as a float 32 tensor of shape (batch_size, embedding_size). - Embeddings are positive and L2-normed, thus they lay in the range [0, 1]. - """ - # Pass the input through the LSTM layers and retrieve the final hidden state of the last - # layer. Apply a cutoff to 0 for negative values and L2 normalize the embeddings. - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - @staticmethod - def compute_partial_slices(n_samples: int, rate, min_coverage): - """ - Computes where to split an utterance waveform and its corresponding mel spectrogram to - obtain partial utterances of each. Both the waveform and the - mel spectrogram slices are returned, so as to make each partial utterance waveform - correspond to its spectrogram. - - The returned ranges may be indexing further than the length of the waveform. It is - recommended that you pad the waveform with zeros up to wav_slices[-1].stop. - - :param n_samples: the number of samples in the waveform - :param rate: how many partial utterances should occur per second. Partial utterances must - cover the span of the entire utterance, thus the rate should not be lower than the inverse - of the duration of a partial utterance. By default, partial utterances are 1.6s long and - the minimum rate is thus 0.625. - :param min_coverage: when reaching the last partial utterance, it may or may not have - enough frames. If at least of are present, - then the last partial utterance will be considered by zero-padding the audio. Otherwise, - it will be discarded. If there aren't enough frames for one partial utterance, - this parameter is ignored so that the function always returns at least one slice. - :return: the waveform slices and mel spectrogram slices as lists of array slices. Index - respectively the waveform and the mel spectrogram with these slices to obtain the partial - utterances. - """ - assert 0 < min_coverage <= 1 - - # Compute how many frames separate two partial utterances - samples_per_frame = int((sampling_rate * mel_window_step / 1000)) - n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) - frame_step = int(np.round((sampling_rate / rate) / samples_per_frame)) - assert 0 < frame_step, "The rate is too high" - assert frame_step <= partials_n_frames, "The rate is too low, it should be %f at least" % \ - (sampling_rate / (samples_per_frame * partials_n_frames)) - - # Compute the slices - wav_slices, mel_slices = [], [] - steps = max(1, n_frames - partials_n_frames + frame_step + 1) - for i in range(0, steps, frame_step): - mel_range = np.array([i, i + partials_n_frames]) - wav_range = mel_range * samples_per_frame - mel_slices.append(slice(*mel_range)) - wav_slices.append(slice(*wav_range)) - - # Evaluate whether extra padding is warranted or not - last_wav_range = wav_slices[-1] - coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start) - if coverage < min_coverage and len(mel_slices) > 1: - mel_slices = mel_slices[:-1] - wav_slices = wav_slices[:-1] - - return wav_slices, mel_slices - - def embed_utterance(self, wav: np.ndarray, return_partials=False, rate=1.3, min_coverage=0.75): - """ - Computes an embedding for a single utterance. The utterance is divided in partial - utterances and an embedding is computed for each. The complete utterance embedding is the - L2-normed average embedding of the partial utterances. - - TODO: independent batched version of this function - - :param wav: a preprocessed utterance waveform as a numpy array of float32 - :param return_partials: if True, the partial embeddings will also be returned along with - the wav slices corresponding to each partial utterance. - :param rate: how many partial utterances should occur per second. Partial utterances must - cover the span of the entire utterance, thus the rate should not be lower than the inverse - of the duration of a partial utterance. By default, partial utterances are 1.6s long and - the minimum rate is thus 0.625. - :param min_coverage: when reaching the last partial utterance, it may or may not have - enough frames. If at least of are present, - then the last partial utterance will be considered by zero-padding the audio. Otherwise, - it will be discarded. If there aren't enough frames for one partial utterance, - this parameter is ignored so that the function always returns at least one slice. - :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If - is True, the partial utterances as a numpy array of float32 of shape - (n_partials, model_embedding_size) and the wav partials as a list of slices will also be - returned. - """ - # Compute where to split the utterance into partials and pad the waveform with zeros if - # the partial utterances cover a larger range. - wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage) - max_wave_length = wav_slices[-1].stop - if max_wave_length >= len(wav): - wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant") - - # Split the utterance into partials and forward them through the model - mel = audio.wav_to_mel_spectrogram(wav) - mels = np.array([mel[s] for s in mel_slices]) - with torch.no_grad(): - mels = torch.from_numpy(mels).to(self.device) - partial_embeds = self(mels).cpu().numpy() - - # Compute the utterance embedding from the partial embeddings - raw_embed = np.mean(partial_embeds, axis=0) - embed = raw_embed / np.linalg.norm(raw_embed, 2) - - if return_partials: - return embed, partial_embeds, wav_slices - return embed - - def embed_speaker(self, wavs: List[np.ndarray], **kwargs): - """ - Compute the embedding of a collection of wavs (presumably from the same speaker) by - averaging their embedding and L2-normalizing it. - - :param wavs: list of wavs a numpy arrays of float32. - :param kwargs: extra arguments to embed_utterance() - :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). - """ - raw_embed = np.mean([self.embed_utterance(wav, return_partials=False, **kwargs) \ - for wav in wavs], axis=0) - return raw_embed / np.linalg.norm(raw_embed, 2) \ No newline at end of file diff --git a/spaces/Miuzarte/SUI-svc-3.0/vdecoder/hifigan/models.py b/spaces/Miuzarte/SUI-svc-3.0/vdecoder/hifigan/models.py deleted file mode 100644 index bdc3fa2c3447f360472d94c2fad9bd74993f6410..0000000000000000000000000000000000000000 --- a/spaces/Miuzarte/SUI-svc-3.0/vdecoder/hifigan/models.py +++ /dev/null @@ -1,500 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (torch.diff(tmp_over_one, dim=1)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/losses/bce_loss.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/losses/bce_loss.py deleted file mode 100644 index df4ce140dc6adb84c42dc4533dc2240dd6ca34bb..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/losses/bce_loss.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Union - -import torch -import torch.nn as nn - -from mmocr.registry import MODELS - - -@MODELS.register_module() -class MaskedBalancedBCEWithLogitsLoss(nn.Module): - """This loss combines a Sigmoid layers and a masked balanced BCE loss in - one single class. It's AMP-eligible. - - Args: - reduction (str, optional): The method to reduce the loss. - Options are 'none', 'mean' and 'sum'. Defaults to 'none'. - negative_ratio (float or int, optional): Maximum ratio of negative - samples to positive ones. Defaults to 3. - fallback_negative_num (int, optional): When the mask contains no - positive samples, the number of negative samples to be sampled. - Defaults to 0. - eps (float, optional): Eps to avoid zero-division error. Defaults to - 1e-6. - """ - - def __init__(self, - reduction: str = 'none', - negative_ratio: Union[float, int] = 3, - fallback_negative_num: int = 0, - eps: float = 1e-6) -> None: - super().__init__() - assert reduction in ['none', 'mean', 'sum'] - assert isinstance(negative_ratio, (float, int)) - assert isinstance(fallback_negative_num, int) - assert isinstance(eps, float) - self.eps = eps - self.negative_ratio = negative_ratio - self.reduction = reduction - self.fallback_negative_num = fallback_negative_num - self.loss = nn.BCEWithLogitsLoss(reduction=reduction) - - def forward(self, - pred: torch.Tensor, - gt: torch.Tensor, - mask: Optional[torch.Tensor] = None) -> torch.Tensor: - """Forward function. - - Args: - pred (torch.Tensor): The prediction in any shape. - gt (torch.Tensor): The learning target of the prediction in the - same shape as pred. - mask (torch.Tensor, optional): Binary mask in the same shape of - pred, indicating positive regions to calculate the loss. Whole - region will be taken into account if not provided. Defaults to - None. - - Returns: - torch.Tensor: The loss value. - """ - - assert pred.size() == gt.size() and gt.numel() > 0 - if mask is None: - mask = torch.ones_like(gt) - assert mask.size() == gt.size() - - positive = (gt * mask).float() - negative = ((1 - gt) * mask).float() - positive_count = int(positive.sum()) - if positive_count == 0: - negative_count = min( - int(negative.sum()), self.fallback_negative_num) - else: - negative_count = min( - int(negative.sum()), int(positive_count * self.negative_ratio)) - - assert gt.max() <= 1 and gt.min() >= 0 - loss = self.loss(pred, gt) - positive_loss = loss * positive - negative_loss = loss * negative - - negative_loss, _ = torch.topk(negative_loss.view(-1), negative_count) - - balance_loss = (positive_loss.sum() + negative_loss.sum()) / ( - positive_count + negative_count + self.eps) - - return balance_loss - - -@MODELS.register_module() -class MaskedBalancedBCELoss(MaskedBalancedBCEWithLogitsLoss): - """Masked Balanced BCE loss. - - Args: - reduction (str, optional): The method to reduce the loss. - Options are 'none', 'mean' and 'sum'. Defaults to 'none'. - negative_ratio (float or int): Maximum ratio of negative - samples to positive ones. Defaults to 3. - fallback_negative_num (int): When the mask contains no - positive samples, the number of negative samples to be sampled. - Defaults to 0. - eps (float): Eps to avoid zero-division error. Defaults to - 1e-6. - """ - - def __init__(self, - reduction: str = 'none', - negative_ratio: Union[float, int] = 3, - fallback_negative_num: int = 0, - eps: float = 1e-6) -> None: - super().__init__() - assert reduction in ['none', 'mean', 'sum'] - assert isinstance(negative_ratio, (float, int)) - assert isinstance(fallback_negative_num, int) - assert isinstance(eps, float) - self.eps = eps - self.negative_ratio = negative_ratio - self.reduction = reduction - self.fallback_negative_num = fallback_negative_num - self.loss = nn.BCELoss(reduction=reduction) - - def forward(self, - pred: torch.Tensor, - gt: torch.Tensor, - mask: Optional[torch.Tensor] = None) -> torch.Tensor: - """Forward function. - - Args: - pred (torch.Tensor): The prediction in any shape. - gt (torch.Tensor): The learning target of the prediction in the - same shape as pred. - mask (torch.Tensor, optional): Binary mask in the same shape of - pred, indicating positive regions to calculate the loss. Whole - region will be taken into account if not provided. Defaults to - None. - - Returns: - torch.Tensor: The loss value. - """ - - assert pred.max() <= 1 and pred.min() >= 0 - return super().forward(pred, gt, mask) - - -@MODELS.register_module() -class MaskedBCEWithLogitsLoss(nn.Module): - """This loss combines a Sigmoid layers and a masked BCE loss in one single - class. It's AMP-eligible. - - Args: - eps (float): Eps to avoid zero-division error. Defaults to - 1e-6. - """ - - def __init__(self, eps: float = 1e-6) -> None: - super().__init__() - assert isinstance(eps, float) - self.eps = eps - self.loss = nn.BCEWithLogitsLoss(reduction='none') - - def forward(self, - pred: torch.Tensor, - gt: torch.Tensor, - mask: Optional[torch.Tensor] = None) -> torch.Tensor: - """Forward function. - - Args: - pred (torch.Tensor): The prediction in any shape. - gt (torch.Tensor): The learning target of the prediction in the - same shape as pred. - mask (torch.Tensor, optional): Binary mask in the same shape of - pred, indicating positive regions to calculate the loss. Whole - region will be taken into account if not provided. Defaults to - None. - - Returns: - torch.Tensor: The loss value. - """ - - assert pred.size() == gt.size() and gt.numel() > 0 - if mask is None: - mask = torch.ones_like(gt) - assert mask.size() == gt.size() - - assert gt.max() <= 1 and gt.min() >= 0 - loss = self.loss(pred, gt) - - return (loss * mask).sum() / (mask.sum() + self.eps) - - -@MODELS.register_module() -class MaskedBCELoss(MaskedBCEWithLogitsLoss): - """Masked BCE loss. - - Args: - eps (float): Eps to avoid zero-division error. Defaults to - 1e-6. - """ - - def __init__(self, eps: float = 1e-6) -> None: - super().__init__() - assert isinstance(eps, float) - self.eps = eps - self.loss = nn.BCELoss(reduction='none') - - def forward(self, - pred: torch.Tensor, - gt: torch.Tensor, - mask: Optional[torch.Tensor] = None) -> torch.Tensor: - """Forward function. - - Args: - pred (torch.Tensor): The prediction in any shape. - gt (torch.Tensor): The learning target of the prediction in the - same shape as pred. - mask (torch.Tensor, optional): Binary mask in the same shape of - pred, indicating positive regions to calculate the loss. Whole - region will be taken into account if not provided. Defaults to - None. - - Returns: - torch.Tensor: The loss value. - """ - - assert pred.max() <= 1 and pred.min() >= 0 - - return super().forward(pred, gt, mask) diff --git a/spaces/MrBodean/VoiceClone/vocoder/train.py b/spaces/MrBodean/VoiceClone/vocoder/train.py deleted file mode 100644 index 6dc2f892e1fc134b311e2c9ee42250a2d3713547..0000000000000000000000000000000000000000 --- a/spaces/MrBodean/VoiceClone/vocoder/train.py +++ /dev/null @@ -1,127 +0,0 @@ -from vocoder.models.fatchord_version import WaveRNN -from vocoder.vocoder_dataset import VocoderDataset, collate_vocoder -from vocoder.distribution import discretized_mix_logistic_loss -from vocoder.display import stream, simple_table -from vocoder.gen_wavernn import gen_testset -from torch.utils.data import DataLoader -from pathlib import Path -from torch import optim -import torch.nn.functional as F -import vocoder.hparams as hp -import numpy as np -import time -import torch -import platform - -def train(run_id: str, syn_dir: Path, voc_dir: Path, models_dir: Path, ground_truth: bool, - save_every: int, backup_every: int, force_restart: bool): - # Check to make sure the hop length is correctly factorised - assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length - - # Instantiate the model - print("Initializing the model...") - model = WaveRNN( - rnn_dims=hp.voc_rnn_dims, - fc_dims=hp.voc_fc_dims, - bits=hp.bits, - pad=hp.voc_pad, - upsample_factors=hp.voc_upsample_factors, - feat_dims=hp.num_mels, - compute_dims=hp.voc_compute_dims, - res_out_dims=hp.voc_res_out_dims, - res_blocks=hp.voc_res_blocks, - hop_length=hp.hop_length, - sample_rate=hp.sample_rate, - mode=hp.voc_mode - ) - - if torch.cuda.is_available(): - model = model.cuda() - device = torch.device('cuda') - else: - device = torch.device('cpu') - - # Initialize the optimizer - optimizer = optim.Adam(model.parameters()) - for p in optimizer.param_groups: - p["lr"] = hp.voc_lr - loss_func = F.cross_entropy if model.mode == "RAW" else discretized_mix_logistic_loss - - # Load the weights - model_dir = models_dir.joinpath(run_id) - model_dir.mkdir(exist_ok=True) - weights_fpath = model_dir.joinpath(run_id + ".pt") - if force_restart or not weights_fpath.exists(): - print("\nStarting the training of WaveRNN from scratch\n") - model.save(weights_fpath, optimizer) - else: - print("\nLoading weights at %s" % weights_fpath) - model.load(weights_fpath, optimizer) - print("WaveRNN weights loaded from step %d" % model.step) - - # Initialize the dataset - metadata_fpath = syn_dir.joinpath("train.txt") if ground_truth else \ - voc_dir.joinpath("synthesized.txt") - mel_dir = syn_dir.joinpath("mels") if ground_truth else voc_dir.joinpath("mels_gta") - wav_dir = syn_dir.joinpath("audio") - dataset = VocoderDataset(metadata_fpath, mel_dir, wav_dir) - test_loader = DataLoader(dataset, - batch_size=1, - shuffle=True, - pin_memory=True) - - # Begin the training - simple_table([('Batch size', hp.voc_batch_size), - ('LR', hp.voc_lr), - ('Sequence Len', hp.voc_seq_len)]) - - for epoch in range(1, 350): - data_loader = DataLoader(dataset, - collate_fn=collate_vocoder, - batch_size=hp.voc_batch_size, - num_workers=2 if platform.system() != "Windows" else 0, - shuffle=True, - pin_memory=True) - start = time.time() - running_loss = 0. - - for i, (x, y, m) in enumerate(data_loader, 1): - if torch.cuda.is_available(): - x, m, y = x.cuda(), m.cuda(), y.cuda() - - # Forward pass - y_hat = model(x, m) - if model.mode == 'RAW': - y_hat = y_hat.transpose(1, 2).unsqueeze(-1) - elif model.mode == 'MOL': - y = y.float() - y = y.unsqueeze(-1) - - # Backward pass - loss = loss_func(y_hat, y) - optimizer.zero_grad() - loss.backward() - optimizer.step() - - running_loss += loss.item() - speed = i / (time.time() - start) - avg_loss = running_loss / i - - step = model.get_step() - k = step // 1000 - - if backup_every != 0 and step % backup_every == 0 : - model.checkpoint(model_dir, optimizer) - - if save_every != 0 and step % save_every == 0 : - model.save(weights_fpath, optimizer) - - msg = f"| Epoch: {epoch} ({i}/{len(data_loader)}) | " \ - f"Loss: {avg_loss:.4f} | {speed:.1f} " \ - f"steps/s | Step: {k}k | " - stream(msg) - - - gen_testset(model, test_loader, hp.voc_gen_at_checkpoint, hp.voc_gen_batched, - hp.voc_target, hp.voc_overlap, model_dir) - print("") diff --git a/spaces/Mwebrania/clasma_database/README.md b/spaces/Mwebrania/clasma_database/README.md deleted file mode 100644 index 1de3b43bbca332e66e2ef2512098d46534592d36..0000000000000000000000000000000000000000 --- a/spaces/Mwebrania/clasma_database/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Clasma Database -emoji: 👁 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NATSpeech/DiffSpeech/utils/audio/pitch/utils.py b/spaces/NATSpeech/DiffSpeech/utils/audio/pitch/utils.py deleted file mode 100644 index 238b8022185753a7d4d9d674d189a99050c29b6f..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/utils/audio/pitch/utils.py +++ /dev/null @@ -1,82 +0,0 @@ -import numpy as np -import torch - - -def to_lf0(f0): - f0[f0 < 1.0e-5] = 1.0e-6 - lf0 = f0.log() if isinstance(f0, torch.Tensor) else np.log(f0) - lf0[f0 < 1.0e-5] = - 1.0E+10 - return lf0 - - -def to_f0(lf0): - f0 = np.where(lf0 <= 0, 0.0, np.exp(lf0)) - return f0.flatten() - - -def f0_to_coarse(f0, f0_bin=256, f0_max=900.0, f0_min=50.0): - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - is_torch = isinstance(f0, torch.Tensor) - f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1 - - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1 - f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(int) - assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min(), f0.min(), f0.max()) - return f0_coarse - - -def coarse_to_f0(f0_coarse, f0_bin=256, f0_max=900.0, f0_min=50.0): - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - uv = f0_coarse == 1 - f0 = f0_mel_min + (f0_coarse - 1) * (f0_mel_max - f0_mel_min) / (f0_bin - 2) - f0 = ((f0 / 1127).exp() - 1) * 700 - f0[uv] = 0 - return f0 - - -def norm_f0(f0, uv, pitch_norm='log', f0_mean=400, f0_std=100): - is_torch = isinstance(f0, torch.Tensor) - if pitch_norm == 'standard': - f0 = (f0 - f0_mean) / f0_std - if pitch_norm == 'log': - f0 = torch.log2(f0 + 1e-8) if is_torch else np.log2(f0 + 1e-8) - if uv is not None: - f0[uv > 0] = 0 - return f0 - - -def norm_interp_f0(f0, pitch_norm='log', f0_mean=None, f0_std=None): - is_torch = isinstance(f0, torch.Tensor) - if is_torch: - device = f0.device - f0 = f0.data.cpu().numpy() - uv = f0 == 0 - f0 = norm_f0(f0, uv, pitch_norm, f0_mean, f0_std) - if sum(uv) == len(f0): - f0[uv] = 0 - elif sum(uv) > 0: - f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv]) - if is_torch: - uv = torch.FloatTensor(uv) - f0 = torch.FloatTensor(f0) - f0 = f0.to(device) - uv = uv.to(device) - return f0, uv - - -def denorm_f0(f0, uv, pitch_norm='log', f0_mean=400, f0_std=100, pitch_padding=None, min=50, max=900): - is_torch = isinstance(f0, torch.Tensor) - if pitch_norm == 'standard': - f0 = f0 * f0_std + f0_mean - if pitch_norm == 'log': - f0 = 2 ** f0 - f0 = f0.clamp(min=min, max=max) if is_torch else np.clip(f0, a_min=min, a_max=max) - if uv is not None: - f0[uv > 0] = 0 - if pitch_padding is not None: - f0[pitch_padding] = 0 - return f0 diff --git a/spaces/NN520/AI/cloudflare/worker.js b/spaces/NN520/AI/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/NN520/AI/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/NagaSaiAbhinay/UnCLIP_Image_Interpolation_Demo/README.md b/spaces/NagaSaiAbhinay/UnCLIP_Image_Interpolation_Demo/README.md deleted file mode 100644 index 7bd1297f5f777c4feab5837172366db9242bd4be..0000000000000000000000000000000000000000 --- a/spaces/NagaSaiAbhinay/UnCLIP_Image_Interpolation_Demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: UnCLIP Image Interpolation Demo -emoji: 📈 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nee001/bing0/src/pages/api/blob.ts b/spaces/Nee001/bing0/src/pages/api/blob.ts deleted file mode 100644 index fecd48031916b2284b8958892196e0a1ad420421..0000000000000000000000000000000000000000 --- a/spaces/Nee001/bing0/src/pages/api/blob.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { Readable } from 'node:stream' -import { fetch } from '@/lib/isomorphic' - -const API_DOMAIN = 'https://www.bing.com' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { bcid } = req.query - - const { headers, body } = await fetch(`${API_DOMAIN}/images/blob?bcid=${bcid}`, - { - method: 'GET', - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referrer-Policy": "origin-when-cross-origin", - }, - }, - ) - - res.writeHead(200, { - 'Content-Length': headers.get('content-length')!, - 'Content-Type': headers.get('content-type')!, - }) - // @ts-ignore - return Readable.fromWeb(body!).pipe(res) - } catch (e) { - console.log('Error', e) - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/Nick1/rvc-models/README.md b/spaces/Nick1/rvc-models/README.md deleted file mode 100644 index 325c6b6ccd5bc6ba83860fc5771f9f89a5326d9b..0000000000000000000000000000000000000000 --- a/spaces/Nick1/rvc-models/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 'RVC Model ' -emoji: 🎤 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -license: mit -duplicated_from: rvc-models ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Nultx/VITS-TTS/app.py b/spaces/Nultx/VITS-TTS/app.py deleted file mode 100644 index c9bfb000af1af5ec0a745290b95431df58ad7a61..0000000000000000000000000000000000000000 --- a/spaces/Nultx/VITS-TTS/app.py +++ /dev/null @@ -1,256 +0,0 @@ -import argparse -import json -import os -import re -import tempfile -import logging - -logging.getLogger('numba').setLevel(logging.WARNING) -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -import gradio.utils as gr_utils -import gradio.processing_utils as gr_processing_utils -import ONNXVITS_infer -import models -from text import text_to_sequence, _clean_text -from text.symbols import symbols -from mel_processing import spectrogram_torch -import psutil -from datetime import datetime - -language_marks = { - "Japanese": "", - "日本語": "[JA]", - "简体中文": "[ZH]", - "English": "[EN]", - "Mix": "", -} - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, language, speed, is_symbol): - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 150 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - if language is not None: - text = language_marks[language] + text + language_marks[language] - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - sid = LongTensor([speaker_id]) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - -def create_vc_fn(model, hps, speaker_ids): - def vc_fn(original_speaker, target_speaker, input_audio): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - original_speaker_id = speaker_ids[original_speaker] - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != hps.data.sampling_rate: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate) - with no_grad(): - y = torch.FloatTensor(audio) - y = y.unsqueeze(0) - spec = spectrogram_torch(y, hps.data.filter_length, - hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, - center=False) - spec_lengths = LongTensor([spec.size(-1)]) - sid_src = LongTensor([original_speaker_id]) - sid_tgt = LongTensor([target_speaker_id]) - audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][ - 0, 0].data.cpu().float().numpy() - del y, spec, spec_lengths, sid_src, sid_tgt - return "Success", (hps.data.sampling_rate, audio) - - return vc_fn - - -def get_text(text, hps, is_symbol): - text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_text): - return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \ - else (temp_text, temp_text) - - return to_symbol_fn - - -models_tts = [] -models_vc = [] -models_info = [ - { - "title": "Trilingual", - "languages": ['日本語', '简体中文', 'English', 'Mix'], - "description": """ - This model is trained on a mix up of Umamusume, Genshin Impact, Sanoba Witch & VCTK voice data to learn multilanguage. - All characters can speak English, Chinese & Japanese.\n\n - To mix multiple languages in a single sentence, wrap the corresponding part with language tokens - ([JA] for Japanese, [ZH] for Chinese, [EN] for English), as shown in the examples.\n\n - 这个模型在赛马娘,原神,魔女的夜宴以及VCTK数据集上混合训练以学习多种语言。 - 所有角色均可说中日英三语。\n\n - 若需要在同一个句子中混合多种语言,使用相应的语言标记包裹句子。 - (日语用[JA], 中文用[ZH], 英文用[EN]),参考Examples中的示例。 - """, - "model_path": "./pretrained_models/G_trilingual.pth", - "config_path": "./configs/uma_trilingual.json", - "examples": [['你好,训练员先生,很高兴见到你。', '草上飞 Grass Wonder (Umamusume Pretty Derby)', '简体中文', 1, False], - ['To be honest, I have no idea what to say as examples.', '派蒙 Paimon (Genshin Impact)', 'English', - 1, False], - ['授業中に出しだら,学校生活終わるですわ。', '綾地 寧々 Ayachi Nene (Sanoba Witch)', '日本語', 1, False], - ['[JA]こんにちわ。[JA][ZH]你好![ZH][EN]Hello![EN]', '綾地 寧々 Ayachi Nene (Sanoba Witch)', 'Mix', 1, False]], - "onnx_dir": "./ONNX_net/G_trilingual/" - }, - { - "title": "Japanese", - "languages": ["Japanese"], - "description": """ - This model contains 87 characters from Umamusume: Pretty Derby, Japanese only.\n\n - 这个模型包含赛马娘的所有87名角色,只能合成日语。 - """, - "model_path": "./pretrained_models/G_jp.pth", - "config_path": "./configs/uma87.json", - "examples": [['お疲れ様です,トレーナーさん。', '无声铃鹿 Silence Suzuka (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['張り切っていこう!', '北部玄驹 Kitasan Black (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['何でこんなに慣れでんのよ,私のほが先に好きだっだのに。', '草上飞 Grass Wonder (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['授業中に出しだら,学校生活終わるですわ。', '目白麦昆 Mejiro Mcqueen (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['お帰りなさい,お兄様!', '米浴 Rice Shower (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['私の処女をもらっでください!', '米浴 Rice Shower (Umamusume Pretty Derby)', 'Japanese', 1, False]], - "onnx_dir": "./ONNX_net/G_jp/" - }, -] - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - for info in models_info: - name = info['title'] - lang = info['languages'] - examples = info['examples'] - config_path = info['config_path'] - model_path = info['model_path'] - description = info['description'] - onnx_dir = info["onnx_dir"] - hps = utils.get_hparams_from_file(config_path) - model = ONNXVITS_infer.SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - ONNX_dir=onnx_dir, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval() - speaker_ids = hps.speakers - speakers = list(hps.speakers.keys()) - models_tts.append((name, description, speakers, lang, examples, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_symbol_fn(hps))) - models_vc.append((name, description, speakers, create_vc_fn(model, hps, speaker_ids))) - app = gr.Blocks() - with app: - gr.Markdown("# English & Chinese & Japanese Anime TTS\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=Plachta.VITS-Umamusume-voice-synthesizer)\n\n" - "Including Japanese TTS & Trilingual TTS, speakers are all anime characters. \n\n包含一个纯日语TTS和一个中日英三语TTS模型,主要为二次元角色。\n\n" - "If you have any suggestions or bug reports, feel free to open discussion in [Community](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer/discussions).\n\n" - "若有bug反馈或建议,请在[Community](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer/discussions)下开启一个新的Discussion。 \n\n" - ) - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, description, speakers, lang, example, symbols, tts_fn, to_symbol_fn) in enumerate( - models_tts): - with gr.TabItem(name): - gr.Markdown(description) - with gr.Row(): - with gr.Column(): - textbox = gr.TextArea(label="Text", - placeholder="Type your sentence here (Maximum 150 words)", - value="こんにちわ。", elem_id=f"tts-input") - with gr.Accordion(label="Phoneme Input", open=False): - temp_text_var = gr.Variable() - symbol_input = gr.Checkbox(value=False, label="Symbol input") - symbol_list = gr.Dataset(label="Symbol list", components=[textbox], - samples=[[x] for x in symbols], - elem_id=f"symbol-list") - symbol_list_json = gr.Json(value=symbols, visible=False) - symbol_input.change(to_symbol_fn, - [symbol_input, textbox, temp_text_var], - [textbox, temp_text_var]) - symbol_list.click(None, [symbol_list, symbol_list_json], textbox, - _js=f""" - (i, symbols, text) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - - text = text_input.value; - - return text; - }}""") - # select character - char_dropdown = gr.Dropdown(choices=speakers, value=speakers[0], label='character') - language_dropdown = gr.Dropdown(choices=lang, value=lang[0], label='language') - duration_slider = gr.Slider(minimum=0.1, maximum=5, value=1, step=0.1, - label='速度 Speed') - with gr.Column(): - text_output = gr.Textbox(label="Message") - audio_output = gr.Audio(label="Output Audio", elem_id="tts-audio") - btn = gr.Button("Generate!") - btn.click(tts_fn, - inputs=[textbox, char_dropdown, language_dropdown, duration_slider, - symbol_input], - outputs=[text_output, audio_output]) - gr.Examples( - examples=example, - inputs=[textbox, char_dropdown, language_dropdown, - duration_slider, symbol_input], - outputs=[text_output, audio_output], - fn=tts_fn - ) - app.queue(concurrency_count=3).launch(show_api=False, share=args.share) \ No newline at end of file diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/setup.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/setup.py deleted file mode 100644 index 4379b2c31f593134fb027cf01da5fcd706a64e00..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/setup.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import os -import subprocess -import sys - -from setuptools import Extension, find_packages, setup - -if sys.version_info < (3, 6): - sys.exit("Sorry, Python >= 3.6 is required for fairseq.") - - -def write_version_py(): - with open(os.path.join("fairseq", "version.txt")) as f: - version = f.read().strip() - - # append latest commit hash to version string - try: - sha = ( - subprocess.check_output(["git", "rev-parse", "HEAD"]) - .decode("ascii") - .strip() - ) - version += "+" + sha[:7] - except Exception: - pass - - # write version info to fairseq/version.py - with open(os.path.join("fairseq", "version.py"), "w") as f: - f.write('__version__ = "{}"\n'.format(version)) - return version - - -version = write_version_py() - - -with open("README.md") as f: - readme = f.read() - - -if sys.platform == "darwin": - extra_compile_args = ["-stdlib=libc++", "-O3"] -else: - extra_compile_args = ["-std=c++11", "-O3"] - - -class NumpyExtension(Extension): - """Source: https://stackoverflow.com/a/54128391""" - - def __init__(self, *args, **kwargs): - self.__include_dirs = [] - super().__init__(*args, **kwargs) - - @property - def include_dirs(self): - import numpy - - return self.__include_dirs + [numpy.get_include()] - - @include_dirs.setter - def include_dirs(self, dirs): - self.__include_dirs = dirs - - -extensions = [ - Extension( - "fairseq.libbleu", - sources=[ - "fairseq/clib/libbleu/libbleu.cpp", - "fairseq/clib/libbleu/module.cpp", - ], - extra_compile_args=extra_compile_args, - ), - NumpyExtension( - "fairseq.data.data_utils_fast", - sources=["fairseq/data/data_utils_fast.pyx"], - language="c++", - extra_compile_args=extra_compile_args, - ), - NumpyExtension( - "fairseq.data.token_block_utils_fast", - sources=["fairseq/data/token_block_utils_fast.pyx"], - language="c++", - extra_compile_args=extra_compile_args, - ), -] - - -cmdclass = {} - - -try: - # torch is not available when generating docs - from torch.utils import cpp_extension - - extensions.extend( - [ - cpp_extension.CppExtension( - "fairseq.libbase", - sources=[ - "fairseq/clib/libbase/balanced_assignment.cpp", - ], - ) - ] - ) - - extensions.extend( - [ - cpp_extension.CppExtension( - "fairseq.libnat", - sources=[ - "fairseq/clib/libnat/edit_dist.cpp", - ], - ), - cpp_extension.CppExtension( - "alignment_train_cpu_binding", - sources=[ - "examples/operators/alignment_train_cpu.cpp", - ], - ), - ] - ) - if "CUDA_HOME" in os.environ: - extensions.extend( - [ - cpp_extension.CppExtension( - "fairseq.libnat_cuda", - sources=[ - "fairseq/clib/libnat_cuda/edit_dist.cu", - "fairseq/clib/libnat_cuda/binding.cpp", - ], - ), - cpp_extension.CppExtension( - "fairseq.ngram_repeat_block_cuda", - sources=[ - "fairseq/clib/cuda/ngram_repeat_block_cuda.cpp", - "fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu", - ], - ), - cpp_extension.CppExtension( - "alignment_train_cuda_binding", - sources=[ - "examples/operators/alignment_train_kernel.cu", - "examples/operators/alignment_train_cuda.cpp", - ], - ), - ] - ) - cmdclass["build_ext"] = cpp_extension.BuildExtension - -except ImportError: - pass - - -if "READTHEDOCS" in os.environ: - # don't build extensions when generating docs - extensions = [] - if "build_ext" in cmdclass: - del cmdclass["build_ext"] - - # use CPU build of PyTorch - dependency_links = [ - "https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl" - ] -else: - dependency_links = [] - - -if "clean" in sys.argv[1:]: - # Source: https://bit.ly/2NLVsgE - print("deleting Cython files...") - import subprocess - - subprocess.run( - ["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"], - shell=True, - ) - - -extra_packages = [] -if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")): - extra_packages.append("fairseq.model_parallel.megatron.mpu") - - -def do_setup(package_data): - setup( - name="fairseq", - version=version, - description="Facebook AI Research Sequence-to-Sequence Toolkit", - url="https://github.com/pytorch/fairseq", - classifiers=[ - "Intended Audience :: Science/Research", - "License :: OSI Approved :: MIT License", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - ], - long_description=readme, - long_description_content_type="text/markdown", - setup_requires=[ - "cython", - 'numpy<1.20.0; python_version<"3.7"', - 'numpy; python_version>="3.7"', - "setuptools>=18.0", - ], - install_requires=[ - "cffi", - "cython", - 'dataclasses; python_version<"3.7"', - "hydra-core>=1.0.7,<1.1", - "omegaconf<2.1", - 'numpy<1.20.0; python_version<"3.7"', - 'numpy; python_version>="3.7"', - "regex", - "sacrebleu>=1.4.12", - # "torch", - "tqdm", - "bitarray", - # "torchaudio>=0.8.0", - ], - dependency_links=dependency_links, - packages=find_packages( - exclude=[ - "examples", - "examples.*", - "scripts", - "scripts.*", - "tests", - "tests.*", - ] - ) - + extra_packages, - package_data=package_data, - ext_modules=extensions, - test_suite="tests", - entry_points={ - "console_scripts": [ - "fairseq-eval-lm = fairseq_cli.eval_lm:cli_main", - "fairseq-generate = fairseq_cli.generate:cli_main", - "fairseq-hydra-train = fairseq_cli.hydra_train:cli_main", - "fairseq-interactive = fairseq_cli.interactive:cli_main", - "fairseq-preprocess = fairseq_cli.preprocess:cli_main", - "fairseq-score = fairseq_cli.score:cli_main", - "fairseq-train = fairseq_cli.train:cli_main", - "fairseq-validate = fairseq_cli.validate:cli_main", - ], - }, - cmdclass=cmdclass, - zip_safe=False, - ) - - -def get_files(path, relative_to="fairseq"): - all_files = [] - for root, _dirs, files in os.walk(path, followlinks=True): - root = os.path.relpath(root, relative_to) - for file in files: - if file.endswith(".pyc"): - continue - all_files.append(os.path.join(root, file)) - return all_files - - -if __name__ == "__main__": - try: - # symlink examples into fairseq package so package_data accepts them - fairseq_examples = os.path.join("fairseq", "examples") - if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples): - os.symlink(os.path.join("..", "examples"), fairseq_examples) - - package_data = { - "fairseq": ( - get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config")) - ) - } - do_setup(package_data) - finally: - if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples): - os.unlink(fairseq_examples) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh deleted file mode 100644 index e3efeb21d302ef8d9eae8f1d4b06434c593705f6..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -echo 'Cloning Moses github repository (for tokenization scripts)...' -git clone https://github.com/moses-smt/mosesdecoder.git - -SCRIPTS=mosesdecoder/scripts -TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl -CLEAN=$SCRIPTS/training/clean-corpus-n.perl -REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl - -URLS=( - "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz" - "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz" - "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz" - "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz" - "http://data.statmt.org/wmt17/translation-task/dev.tgz" - "http://statmt.org/wmt14/test-full.tgz" -) -CORPORA=( - "training/europarl-v7.de-en" - "commoncrawl.de-en" - "training-parallel-nc-v13/news-commentary-v13.de-en" - "rapid2016.de-en" -) - -if [ ! -d "$SCRIPTS" ]; then - echo "Please set SCRIPTS variable correctly to point to Moses scripts." - exit -fi - -src=en -tgt=de -lang=en-de -prep=wmt18_en_de -tmp=$prep/tmp -orig=orig -dev=dev/newstest2012 -codes=32000 -bpe=bpe.32k - -mkdir -p $orig $tmp $prep $bpe - -cd $orig - -for ((i=0;i<${#URLS[@]};++i)); do - url=${URLS[i]} - file=$(basename $url) - if [ -f $file ]; then - echo "$file already exists, skipping download" - else - wget "$url" - if [ -f $file ]; then - echo "$url successfully downloaded." - else - echo "$url not successfully downloaded." - exit 1 - fi - if [ ${file: -4} == ".tgz" ]; then - tar zxvf $file - elif [ ${file: -4} == ".tar" ]; then - tar xvf $file - fi - fi -done -cd .. - -echo "pre-processing train data..." -for l in $src $tgt; do - rm -rf $tmp/train.tags.$lang.tok.$l - for f in "${CORPORA[@]}"; do - cat $orig/$f.$l | \ - perl $REM_NON_PRINT_CHAR | \ - perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/train.tags.$lang.tok.$l - done -done - -echo "pre-processing test data..." -for l in $src $tgt; do - if [ "$l" == "$src" ]; then - t="src" - else - t="ref" - fi - grep '\s*//g' | \ - sed -e 's/\s*<\/seg>\s*//g' | \ - sed -e "s/\’/\'/g" | \ - perl $TOKENIZER -threads 8 -l $l -no-escape > $tmp/test.$l - echo "" -done - -# apply length filtering before BPE -perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train 1 100 - -# use newstest2012 for valid -echo "pre-processing valid data..." -for l in $src $tgt; do - rm -rf $tmp/valid.$l - cat $orig/$dev.$l | \ - perl $REM_NON_PRINT_CHAR | \ - perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/valid.$l -done - -mkdir output -mv $tmp/{train,valid,test}.{$src,$tgt} output - -#BPE -git clone https://github.com/glample/fastBPE.git -pushd fastBPE -g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast -popd -fastBPE/fast learnbpe $codes output/train.$src output/train.$tgt > $bpe/codes -for split in {train,valid,test}; do for lang in {en,de}; do fastBPE/fast applybpe $bpe/$split.$lang output/$split.$lang $bpe/codes; done; done diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/README.md deleted file mode 100644 index 7a76ffd57c066c20af94aa3fca24c18e2ba4c3dd..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Generative Spoken Language Modeling - -* [Paper](https://arxiv.org/abs/2102.01192) -* [Demo](https://speechbot.github.io/gslm/index.html) - -We build and evaluate generative speech2speech systems using [Log Mel Filtebank](https://pytorch.org/audio/stable/compliance.kaldi.html#fbank), [Modified CPC](https://github.com/facebookresearch/CPC_audio), [HuBERT Base](https://github.com/pytorch/fairseq/tree/main/examples/hubert) and [Wav2Vec 2.0 Large](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec). Our system is composed of three components, namely, *speech2unit*, *ulm* and *unit2speech*. We explain about models and usage of these components in their respective sub-directories. See the links below. - -## Speech to Unit Model (speech2unit) -Speech to unit model is used for quantizing raw speech into learned discrete speech units. [More details](speech2unit) - -## Unit Language Model (ulm) -Unit Language Model is a generative language model trained on discrete speech units. [More details](ulm) - -## Unit to Speech Model (unit2speech) -Unit to speech model is used for synthesizing speech from discrete speech units. [More details](unit2speech) - -## Metrics -We show how to compute ASR based metrics as well as zero-shot metrics proposed in our paper [here](metrics). - -## Tools -We share two tools to resynthesize a given spoken utterance, and generate novel spoken language given a spoken prompt. [More detail](tools) diff --git a/spaces/PAIR/PAIR-Diffusion/ldm/models/diffusion/plms.py b/spaces/PAIR/PAIR-Diffusion/ldm/models/diffusion/plms.py deleted file mode 100644 index 7002a365d27168ced0a04e9a4d83e088f8284eae..0000000000000000000000000000000000000000 --- a/spaces/PAIR/PAIR-Diffusion/ldm/models/diffusion/plms.py +++ /dev/null @@ -1,244 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like -from ldm.models.diffusion.sampling_util import norm_thresholding - - -class PLMSSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - if ddim_eta != 0: - raise ValueError('ddim_eta must be 0 for PLMS') - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for PLMS sampling is {size}') - - samples, intermediates = self.plms_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ) - return samples, intermediates - - @torch.no_grad() - def plms_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, - dynamic_threshold=None): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running PLMS Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) - old_eps = [] - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - old_eps=old_eps, t_next=ts_next, - dynamic_threshold=dynamic_threshold) - img, pred_x0, e_t = outs - old_eps.append(e_t) - if len(old_eps) >= 4: - old_eps.pop(0) - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, - dynamic_threshold=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - if dynamic_threshold is not None: - pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t diff --git a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/models/lr_scheduler.py b/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/models/lr_scheduler.py deleted file mode 100644 index f40dd177b645981fb65eafb235c2d91f0d169f58..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/models/lr_scheduler.py +++ /dev/null @@ -1,162 +0,0 @@ -import math -from collections import Counter, defaultdict - -import torch -from torch.optim.lr_scheduler import _LRScheduler - - -class MultiStepLR_Restart(_LRScheduler): - def __init__( - self, optimizer, milestones, restarts=None, weights=None, gamma=0.1, clear_state=False, last_epoch=-1 - ): - self.milestones = Counter(milestones) - self.gamma = gamma - self.clear_state = clear_state - self.restarts = restarts if restarts else [0] - self.restarts = [v + 1 for v in self.restarts] - self.restart_weights = weights if weights else [1] - assert len(self.restarts) == len(self.restart_weights), "restarts and their weights do not match." - super(MultiStepLR_Restart, self).__init__(optimizer, last_epoch) - - def get_lr(self): - if self.last_epoch in self.restarts: - if self.clear_state: - self.optimizer.state = defaultdict(dict) - weight = self.restart_weights[self.restarts.index(self.last_epoch)] - return [group["initial_lr"] * weight for group in self.optimizer.param_groups] - if self.last_epoch not in self.milestones: - return [group["lr"] for group in self.optimizer.param_groups] - return [group["lr"] * self.gamma ** self.milestones[self.last_epoch] for group in self.optimizer.param_groups] - - -class CosineAnnealingLR_Restart(_LRScheduler): - def __init__(self, optimizer, T_period, restarts=None, weights=None, eta_min=0, last_epoch=-1): - self.T_period = T_period - self.T_max = self.T_period[0] # current T period - self.eta_min = eta_min - self.restarts = restarts if restarts else [0] - self.restarts = [v + 1 for v in self.restarts] - self.restart_weights = weights if weights else [1] - self.last_restart = 0 - assert len(self.restarts) == len(self.restart_weights), "restarts and their weights do not match." - super(CosineAnnealingLR_Restart, self).__init__(optimizer, last_epoch) - - def get_lr(self): - if self.last_epoch == 0: - return self.base_lrs - elif self.last_epoch in self.restarts: - self.last_restart = self.last_epoch - self.T_max = self.T_period[self.restarts.index(self.last_epoch) + 1] - weight = self.restart_weights[self.restarts.index(self.last_epoch)] - return [group["initial_lr"] * weight for group in self.optimizer.param_groups] - elif (self.last_epoch - self.last_restart - 1 - self.T_max) % (2 * self.T_max) == 0: - return [ - group["lr"] + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2 - for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) - ] - return [ - (1 + math.cos(math.pi * (self.last_epoch - self.last_restart) / self.T_max)) - / (1 + math.cos(math.pi * ((self.last_epoch - self.last_restart) - 1) / self.T_max)) - * (group["lr"] - self.eta_min) - + self.eta_min - for group in self.optimizer.param_groups - ] - - -if __name__ == "__main__": - optimizer = torch.optim.Adam([torch.zeros(3, 64, 3, 3)], lr=2e-4, weight_decay=0, betas=(0.9, 0.99)) - ############################## - # MultiStepLR_Restart - ############################## - # Original - lr_steps = [200000, 400000, 600000, 800000] - restarts = None - restart_weights = None - - # two - lr_steps = [100000, 200000, 300000, 400000, 490000, 600000, 700000, 800000, 900000, 990000] - restarts = [500000] - restart_weights = [1] - - # four - lr_steps = [ - 50000, - 100000, - 150000, - 200000, - 240000, - 300000, - 350000, - 400000, - 450000, - 490000, - 550000, - 600000, - 650000, - 700000, - 740000, - 800000, - 850000, - 900000, - 950000, - 990000, - ] - restarts = [250000, 500000, 750000] - restart_weights = [1, 1, 1] - - scheduler = MultiStepLR_Restart(optimizer, lr_steps, restarts, restart_weights, gamma=0.5, clear_state=False) - - ############################## - # Cosine Annealing Restart - ############################## - # two - T_period = [500000, 500000] - restarts = [500000] - restart_weights = [1] - - # four - T_period = [250000, 250000, 250000, 250000] - restarts = [250000, 500000, 750000] - restart_weights = [1, 1, 1] - - scheduler = CosineAnnealingLR_Restart( - optimizer, T_period, eta_min=1e-7, restarts=restarts, weights=restart_weights - ) - - ############################## - # Draw figure - ############################## - N_iter = 1000000 - lr_l = list(range(N_iter)) - for i in range(N_iter): - scheduler.step() - current_lr = optimizer.param_groups[0]["lr"] - lr_l[i] = current_lr - - import matplotlib as mpl - import matplotlib.ticker as mtick - from matplotlib import pyplot as plt - - mpl.style.use("default") - import seaborn - - seaborn.set(style="whitegrid") - seaborn.set_context("paper") - - plt.figure(1) - plt.subplot(111) - plt.ticklabel_format(style="sci", axis="x", scilimits=(0, 0)) - plt.title("Title", fontsize=16, color="k") - plt.plot(list(range(N_iter)), lr_l, linewidth=1.5, label="learning rate scheme") - legend = plt.legend(loc="upper right", shadow=False) - ax = plt.gca() - labels = ax.get_xticks().tolist() - for k, v in enumerate(labels): - labels[k] = str(int(v / 1000)) + "K" - ax.set_xticklabels(labels) - ax.yaxis.set_major_formatter(mtick.FormatStrFormatter("%.1e")) - - ax.set_ylabel("Learning rate") - ax.set_xlabel("Iteration") - fig = plt.gcf() - plt.show() diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/serialize.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/serialize.go deleted file mode 100644 index bf9448ce41c274fe7c060d4cc7bac6c7013e7bd4..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/serialize.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/base.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/base.go deleted file mode 100644 index 7dec5c942886a392339592fa5c8165b82c0191d7..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/base.go and /dev/null differ diff --git a/spaces/Pie31415/control-animation/text_to_animation/models/unet_3d_blocks_flax.py b/spaces/Pie31415/control-animation/text_to_animation/models/unet_3d_blocks_flax.py deleted file mode 100644 index 8f14d2c1f7c49021a63679b740de44a2f11a5824..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/text_to_animation/models/unet_3d_blocks_flax.py +++ /dev/null @@ -1,717 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -from torch import nn - -# from .resnet import Downsample2D, ResnetBlock2D, TemporalConvLayer, Upsample2D -# from diffusers.models.transformer_2d import Transformer2DModel -# from .transformer_temporal import TransformerTemporalModel - -from diffusers.models.resnet_flax import ( - FlaxDownsample2D, - FlaxResnetBlock2D, - FlaxUpsample2D, -) -from diffusers.models.attention_flax import FlaxTransformer2DModel -from diffusers.models.transformer_temporal import ( - TransformerTemporalModel, -) # TODO: convert to flax - - -def get_down_block( - down_block_type, - num_layers, - in_channels, - out_channels, - temb_channels, - add_downsample, - resnet_eps, - resnet_act_fn, - attn_num_head_channels, - resnet_groups=None, - cross_attention_dim=None, - downsample_padding=None, - dual_cross_attention=False, - use_linear_projection=True, - only_cross_attention=False, - upcast_attention=False, - resnet_time_scale_shift="default", -): - if down_block_type == "DownBlock3D": - return DownBlock3D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif down_block_type == "CrossAttnDownBlock3D": - if cross_attention_dim is None: - raise ValueError( - "cross_attention_dim must be specified for CrossAttnDownBlock3D" - ) - return CrossAttnDownBlock3D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - cross_attention_dim=cross_attention_dim, - attn_num_head_channels=attn_num_head_channels, - dual_cross_attention=dual_cross_attention, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - raise ValueError(f"{down_block_type} does not exist.") - - -def get_up_block( - up_block_type, - num_layers, - in_channels, - out_channels, - prev_output_channel, - temb_channels, - add_upsample, - resnet_eps, - resnet_act_fn, - attn_num_head_channels, - resnet_groups=None, - cross_attention_dim=None, - dual_cross_attention=False, - use_linear_projection=True, - only_cross_attention=False, - upcast_attention=False, - resnet_time_scale_shift="default", -): - if up_block_type == "UpBlock3D": - return UpBlock3D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - elif up_block_type == "CrossAttnUpBlock3D": - if cross_attention_dim is None: - raise ValueError( - "cross_attention_dim must be specified for CrossAttnUpBlock3D" - ) - return CrossAttnUpBlock3D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - cross_attention_dim=cross_attention_dim, - attn_num_head_channels=attn_num_head_channels, - dual_cross_attention=dual_cross_attention, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - raise ValueError(f"{up_block_type} does not exist.") - - -class FlaxUNetMidBlock3DCrossAttn(nn.Module): - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - output_scale_factor=1.0, - cross_attention_dim=1280, - dual_cross_attention=False, - use_linear_projection=True, - upcast_attention=False, - ): - super().__init__() - - self.has_cross_attention = True - self.attn_num_head_channels = attn_num_head_channels - resnet_groups = ( - resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) - ) - - # there is always at least one resnet - resnets = [ - FlaxResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ] - temp_convs = [ - TemporalConvLayer( - in_channels, - in_channels, - dropout=0.1, - ) - ] - attentions = [] - temp_attentions = [] - - for _ in range(num_layers): - attentions.append( - Transformer2DModel( - in_channels // attn_num_head_channels, - attn_num_head_channels, - in_channels=in_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - use_linear_projection=use_linear_projection, - upcast_attention=upcast_attention, - ) - ) - temp_attentions.append( - TransformerTemporalModel( - in_channels // attn_num_head_channels, - attn_num_head_channels, - in_channels=in_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - temp_convs.append( - TemporalConvLayer( - in_channels, - in_channels, - dropout=0.1, - ) - ) - - self.resnets = nn.ModuleList(resnets) - self.temp_convs = nn.ModuleList(temp_convs) - self.attentions = nn.ModuleList(attentions) - self.temp_attentions = nn.ModuleList(temp_attentions) - - def forward( - self, - hidden_states, - temb=None, - encoder_hidden_states=None, - attention_mask=None, - num_frames=1, - cross_attention_kwargs=None, - ): - hidden_states = self.resnets[0](hidden_states, temb) - hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames) - for attn, temp_attn, resnet, temp_conv in zip( - self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:] - ): - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - cross_attention_kwargs=cross_attention_kwargs, - ).sample - hidden_states = temp_attn( - hidden_states, - num_frames=num_frames, - cross_attention_kwargs=cross_attention_kwargs, - ).sample - hidden_states = resnet(hidden_states, temb) - hidden_states = temp_conv(hidden_states, num_frames=num_frames) - - return hidden_states - - -class CrossAttnDownBlock3D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - cross_attention_dim=1280, - output_scale_factor=1.0, - downsample_padding=1, - add_downsample=True, - dual_cross_attention=False, - use_linear_projection=False, - only_cross_attention=False, - upcast_attention=False, - ): - super().__init__() - resnets = [] - attentions = [] - temp_attentions = [] - temp_convs = [] - - self.has_cross_attention = True - self.attn_num_head_channels = attn_num_head_channels - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - temp_convs.append( - TemporalConvLayer( - out_channels, - out_channels, - dropout=0.1, - ) - ) - attentions.append( - Transformer2DModel( - out_channels // attn_num_head_channels, - attn_num_head_channels, - in_channels=out_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - ) - ) - temp_attentions.append( - TransformerTemporalModel( - out_channels // attn_num_head_channels, - attn_num_head_channels, - in_channels=out_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - self.resnets = nn.ModuleList(resnets) - self.temp_convs = nn.ModuleList(temp_convs) - self.attentions = nn.ModuleList(attentions) - self.temp_attentions = nn.ModuleList(temp_attentions) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - out_channels, - use_conv=True, - out_channels=out_channels, - padding=downsample_padding, - name="op", - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states, - temb=None, - encoder_hidden_states=None, - attention_mask=None, - num_frames=1, - cross_attention_kwargs=None, - ): - # TODO(Patrick, William) - attention mask is not used - output_states = () - - for resnet, temp_conv, attn, temp_attn in zip( - self.resnets, self.temp_convs, self.attentions, self.temp_attentions - ): - hidden_states = resnet(hidden_states, temb) - hidden_states = temp_conv(hidden_states, num_frames=num_frames) - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - cross_attention_kwargs=cross_attention_kwargs, - ).sample - hidden_states = temp_attn( - hidden_states, - num_frames=num_frames, - cross_attention_kwargs=cross_attention_kwargs, - ).sample - - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states - - -class DownBlock3D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - temp_convs = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - temp_convs.append( - TemporalConvLayer( - out_channels, - out_channels, - dropout=0.1, - ) - ) - - self.resnets = nn.ModuleList(resnets) - self.temp_convs = nn.ModuleList(temp_convs) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - out_channels, - use_conv=True, - out_channels=out_channels, - padding=downsample_padding, - name="op", - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, temb=None, num_frames=1): - output_states = () - - for resnet, temp_conv in zip(self.resnets, self.temp_convs): - hidden_states = resnet(hidden_states, temb) - hidden_states = temp_conv(hidden_states, num_frames=num_frames) - - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states - - -class CrossAttnUpBlock3D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - prev_output_channel: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - cross_attention_dim=1280, - output_scale_factor=1.0, - add_upsample=True, - dual_cross_attention=False, - use_linear_projection=False, - only_cross_attention=False, - upcast_attention=False, - ): - super().__init__() - resnets = [] - temp_convs = [] - attentions = [] - temp_attentions = [] - - self.has_cross_attention = True - self.attn_num_head_channels = attn_num_head_channels - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - temp_convs.append( - TemporalConvLayer( - out_channels, - out_channels, - dropout=0.1, - ) - ) - attentions.append( - Transformer2DModel( - out_channels // attn_num_head_channels, - attn_num_head_channels, - in_channels=out_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - ) - ) - temp_attentions.append( - TransformerTemporalModel( - out_channels // attn_num_head_channels, - attn_num_head_channels, - in_channels=out_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - self.resnets = nn.ModuleList(resnets) - self.temp_convs = nn.ModuleList(temp_convs) - self.attentions = nn.ModuleList(attentions) - self.temp_attentions = nn.ModuleList(temp_attentions) - - if add_upsample: - self.upsamplers = nn.ModuleList( - [Upsample2D(out_channels, use_conv=True, out_channels=out_channels)] - ) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states, - res_hidden_states_tuple, - temb=None, - encoder_hidden_states=None, - upsample_size=None, - attention_mask=None, - num_frames=1, - cross_attention_kwargs=None, - ): - # TODO(Patrick, William) - attention mask is not used - for resnet, temp_conv, attn, temp_attn in zip( - self.resnets, self.temp_convs, self.attentions, self.temp_attentions - ): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - hidden_states = resnet(hidden_states, temb) - hidden_states = temp_conv(hidden_states, num_frames=num_frames) - hidden_states = attn( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - cross_attention_kwargs=cross_attention_kwargs, - ).sample - hidden_states = temp_attn( - hidden_states, - num_frames=num_frames, - cross_attention_kwargs=cross_attention_kwargs, - ).sample - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, upsample_size) - - return hidden_states - - -class UpBlock3D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - temp_convs = [] - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - temp_convs.append( - TemporalConvLayer( - out_channels, - out_channels, - dropout=0.1, - ) - ) - - self.resnets = nn.ModuleList(resnets) - self.temp_convs = nn.ModuleList(temp_convs) - - if add_upsample: - self.upsamplers = nn.ModuleList( - [Upsample2D(out_channels, use_conv=True, out_channels=out_channels)] - ) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states, - res_hidden_states_tuple, - temb=None, - upsample_size=None, - num_frames=1, - ): - for resnet, temp_conv in zip(self.resnets, self.temp_convs): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - hidden_states = resnet(hidden_states, temb) - hidden_states = temp_conv(hidden_states, num_frames=num_frames) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, upsample_size) - - return hidden_states diff --git a/spaces/PierreSHI/YOLOS_traffic_object_detection/README.md b/spaces/PierreSHI/YOLOS_traffic_object_detection/README.md deleted file mode 100644 index 407370f9ae96260db85eafc68bc9803d6766cb67..0000000000000000000000000000000000000000 --- a/spaces/PierreSHI/YOLOS_traffic_object_detection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: YOLOS Traffic Object detection -emoji: 🔥 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/packaging/utils.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/packaging/utils.py deleted file mode 100644 index bab11b80c60f10a4f3bccb12eb5b17c48a449767..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/packaging/utils.py +++ /dev/null @@ -1,136 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -from typing import FrozenSet, NewType, Tuple, Union, cast - -from .tags import Tag, parse_tag -from .version import InvalidVersion, Version - -BuildTag = Union[Tuple[()], Tuple[int, str]] -NormalizedName = NewType("NormalizedName", str) - - -class InvalidWheelFilename(ValueError): - """ - An invalid wheel filename was found, users should refer to PEP 427. - """ - - -class InvalidSdistFilename(ValueError): - """ - An invalid sdist filename was found, users should refer to the packaging user guide. - """ - - -_canonicalize_regex = re.compile(r"[-_.]+") -# PEP 427: The build number must start with a digit. -_build_tag_regex = re.compile(r"(\d+)(.*)") - - -def canonicalize_name(name: str) -> NormalizedName: - # This is taken from PEP 503. - value = _canonicalize_regex.sub("-", name).lower() - return cast(NormalizedName, value) - - -def canonicalize_version(version: Union[Version, str]) -> str: - """ - This is very similar to Version.__str__, but has one subtle difference - with the way it handles the release segment. - """ - if isinstance(version, str): - try: - parsed = Version(version) - except InvalidVersion: - # Legacy versions cannot be normalized - return version - else: - parsed = version - - parts = [] - - # Epoch - if parsed.epoch != 0: - parts.append(f"{parsed.epoch}!") - - # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) - - # Pre-release - if parsed.pre is not None: - parts.append("".join(str(x) for x in parsed.pre)) - - # Post-release - if parsed.post is not None: - parts.append(f".post{parsed.post}") - - # Development release - if parsed.dev is not None: - parts.append(f".dev{parsed.dev}") - - # Local version segment - if parsed.local is not None: - parts.append(f"+{parsed.local}") - - return "".join(parts) - - -def parse_wheel_filename( - filename: str, -) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: - if not filename.endswith(".whl"): - raise InvalidWheelFilename( - f"Invalid wheel filename (extension must be '.whl'): {filename}" - ) - - filename = filename[:-4] - dashes = filename.count("-") - if dashes not in (4, 5): - raise InvalidWheelFilename( - f"Invalid wheel filename (wrong number of parts): {filename}" - ) - - parts = filename.split("-", dashes - 2) - name_part = parts[0] - # See PEP 427 for the rules on escaping the project name - if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: - raise InvalidWheelFilename(f"Invalid project name: {filename}") - name = canonicalize_name(name_part) - version = Version(parts[1]) - if dashes == 5: - build_part = parts[2] - build_match = _build_tag_regex.match(build_part) - if build_match is None: - raise InvalidWheelFilename( - f"Invalid build number: {build_part} in '{filename}'" - ) - build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) - else: - build = () - tags = parse_tag(parts[-1]) - return (name, version, build, tags) - - -def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: - if filename.endswith(".tar.gz"): - file_stem = filename[: -len(".tar.gz")] - elif filename.endswith(".zip"): - file_stem = filename[: -len(".zip")] - else: - raise InvalidSdistFilename( - f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" - f" {filename}" - ) - - # We are requiring a PEP 440 version, which cannot contain dashes, - # so we split on the last dash. - name_part, sep, version_part = file_stem.rpartition("-") - if not sep: - raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") - - name = canonicalize_name(name_part) - version = Version(version_part) - return (name, version) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/namespaces.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/namespaces.py deleted file mode 100644 index 44939e1c6d40539eb8173bf1527db926c5a54658..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/namespaces.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from distutils import log -import itertools - - -flatten = itertools.chain.from_iterable - - -class Installer: - - nspkg_ext = '-nspkg.pth' - - def install_namespaces(self): - nsp = self._get_all_ns_packages() - if not nsp: - return - filename, ext = os.path.splitext(self._get_target()) - filename += self.nspkg_ext - self.outputs.append(filename) - log.info("Installing %s", filename) - lines = map(self._gen_nspkg_line, nsp) - - if self.dry_run: - # always generate the lines, even in dry run - list(lines) - return - - with open(filename, 'wt') as f: - f.writelines(lines) - - def uninstall_namespaces(self): - filename, ext = os.path.splitext(self._get_target()) - filename += self.nspkg_ext - if not os.path.exists(filename): - return - log.info("Removing %s", filename) - os.remove(filename) - - def _get_target(self): - return self.target - - _nspkg_tmpl = ( - "import sys, types, os", - "has_mfs = sys.version_info > (3, 5)", - "p = os.path.join(%(root)s, *%(pth)r)", - "importlib = has_mfs and __import__('importlib.util')", - "has_mfs and __import__('importlib.machinery')", - ( - "m = has_mfs and " - "sys.modules.setdefault(%(pkg)r, " - "importlib.util.module_from_spec(" - "importlib.machinery.PathFinder.find_spec(%(pkg)r, " - "[os.path.dirname(p)])))" - ), - ( - "m = m or " - "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))" - ), - "mp = (m or []) and m.__dict__.setdefault('__path__',[])", - "(p not in mp) and mp.append(p)", - ) - "lines for the namespace installer" - - _nspkg_tmpl_multi = ( - 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', - ) - "additional line(s) when a parent package is indicated" - - def _get_root(self): - return "sys._getframe(1).f_locals['sitedir']" - - def _gen_nspkg_line(self, pkg): - pth = tuple(pkg.split('.')) - root = self._get_root() - tmpl_lines = self._nspkg_tmpl - parent, sep, child = pkg.rpartition('.') - if parent: - tmpl_lines += self._nspkg_tmpl_multi - return ';'.join(tmpl_lines) % locals() + '\n' - - def _get_all_ns_packages(self): - """Return sorted list of all package namespaces""" - pkgs = self.distribution.namespace_packages or [] - return sorted(flatten(map(self._pkg_names, pkgs))) - - @staticmethod - def _pkg_names(pkg): - """ - Given a namespace package, yield the components of that - package. - - >>> names = Installer._pkg_names('a.b.c') - >>> set(names) == set(['a', 'a.b', 'a.b.c']) - True - """ - parts = pkg.split('.') - while parts: - yield '.'.join(parts) - parts.pop() - - -class DevelopInstaller(Installer): - def _get_root(self): - return repr(str(self.egg_path)) - - def _get_target(self): - return self.egg_link diff --git a/spaces/Realcat/image-matching-webui/third_party/SOLD2/sold2/dataset/merge_dataset.py b/spaces/Realcat/image-matching-webui/third_party/SOLD2/sold2/dataset/merge_dataset.py deleted file mode 100644 index 1f6395873dcfdea0c35898eefbf4c74a8cfac7a1..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/SOLD2/sold2/dataset/merge_dataset.py +++ /dev/null @@ -1,38 +0,0 @@ -""" Compose multiple datasets in a single loader. """ - -import numpy as np -from copy import deepcopy -from torch.utils.data import Dataset - -from .wireframe_dataset import WireframeDataset -from .holicity_dataset import HolicityDataset - - -class MergeDataset(Dataset): - def __init__(self, mode, config=None): - super(MergeDataset, self).__init__() - # Initialize the datasets - self._datasets = [] - spec_config = deepcopy(config) - for i, d in enumerate(config["datasets"]): - spec_config["dataset_name"] = d - spec_config["gt_source_train"] = config["gt_source_train"][i] - spec_config["gt_source_test"] = config["gt_source_test"][i] - if d == "wireframe": - self._datasets.append(WireframeDataset(mode, spec_config)) - elif d == "holicity": - spec_config["train_split"] = config["train_splits"][i] - self._datasets.append(HolicityDataset(mode, spec_config)) - else: - raise ValueError("Unknown dataset: " + d) - - self._weights = config["weights"] - - def __getitem__(self, item): - dataset = self._datasets[ - np.random.choice(range(len(self._datasets)), p=self._weights) - ] - return dataset[np.random.randint(len(dataset))] - - def __len__(self): - return np.sum([len(d) for d in self._datasets]) diff --git a/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/models/backbone/fpn.py b/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/models/backbone/fpn.py deleted file mode 100644 index 7f38ec13f196793a00cacbaaa3eb7c0a5d8e9605..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/models/backbone/fpn.py +++ /dev/null @@ -1,121 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution without padding""" - return nn.Conv2d( - in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False - ) - - -def conv3x3(in_planes, out_planes, stride=1): - """3x3 convolution with padding""" - return nn.Conv2d( - in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False - ) - - -class ConvBlock(nn.Module): - def __init__(self, in_planes, planes, stride=1, bn=True): - super().__init__() - self.conv = conv3x3(in_planes, planes, stride) - self.bn = nn.BatchNorm2d(planes) if bn is True else None - self.act = nn.GELU() - - def forward(self, x): - y = self.conv(x) - if self.bn: - y = self.bn(y) # F.layer_norm(y, y.shape[1:]) - y = self.act(y) - return y - - -class FPN(nn.Module): - """ - ResNet+FPN, output resolution are 1/8 and 1/2. - Each block has 2 layers. - """ - - def __init__(self, config): - super().__init__() - # Config - block = ConvBlock - initial_dim = config["initial_dim"] - block_dims = config["block_dims"] - - # Class Variable - self.in_planes = initial_dim - - # Networks - self.conv1 = nn.Conv2d( - 1, initial_dim, kernel_size=7, stride=2, padding=3, bias=False - ) - self.bn1 = nn.BatchNorm2d(initial_dim) - self.relu = nn.ReLU(inplace=True) - - self.layer1 = self._make_layer(block, block_dims[0], stride=1) # 1/2 - self.layer2 = self._make_layer(block, block_dims[1], stride=2) # 1/4 - self.layer3 = self._make_layer(block, block_dims[2], stride=2) # 1/8 - self.layer4 = self._make_layer(block, block_dims[3], stride=2) # 1/16 - - # 3. FPN upsample - self.layer3_outconv = conv1x1(block_dims[2], block_dims[3]) - self.layer3_outconv2 = nn.Sequential( - ConvBlock(block_dims[3], block_dims[2]), - conv3x3(block_dims[2], block_dims[2]), - ) - self.layer2_outconv = conv1x1(block_dims[1], block_dims[2]) - self.layer2_outconv2 = nn.Sequential( - ConvBlock(block_dims[2], block_dims[1]), - conv3x3(block_dims[1], block_dims[1]), - ) - self.layer1_outconv = conv1x1(block_dims[0], block_dims[1]) - self.layer1_outconv2 = nn.Sequential( - ConvBlock(block_dims[1], block_dims[0]), - conv3x3(block_dims[0], block_dims[0]), - ) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, dim, stride=1): - layer1 = block(self.in_planes, dim, stride=stride) - layer2 = block(dim, dim, stride=1) - layers = (layer1, layer2) - - self.in_planes = dim - return nn.Sequential(*layers) - - def forward(self, x): - # ResNet Backbone - x0 = self.relu(self.bn1(self.conv1(x))) - x1 = self.layer1(x0) # 1/2 - x2 = self.layer2(x1) # 1/4 - x3 = self.layer3(x2) # 1/8 - x4 = self.layer4(x3) # 1/16 - - # FPN - x4_out_2x = F.interpolate( - x4, scale_factor=2.0, mode="bilinear", align_corners=True - ) - x3_out = self.layer3_outconv(x3) - x3_out = self.layer3_outconv2(x3_out + x4_out_2x) - - x3_out_2x = F.interpolate( - x3_out, scale_factor=2.0, mode="bilinear", align_corners=True - ) - x2_out = self.layer2_outconv(x2) - x2_out = self.layer2_outconv2(x2_out + x3_out_2x) - - x2_out_2x = F.interpolate( - x2_out, scale_factor=2.0, mode="bilinear", align_corners=True - ) - x1_out = self.layer1_outconv(x1) - x1_out = self.layer1_outconv2(x1_out + x2_out_2x) - - return [x3_out, x1_out] diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/utils/util_random.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/utils/util_random.py deleted file mode 100644 index e313e9947bb3232a9458878fd219e1594ab93d57..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/utils/util_random.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Helpers for random number generators.""" -import numpy as np - - -def ensure_rng(rng=None): - """Coerces input into a random number generator. - - If the input is None, then a global random state is returned. - - If the input is a numeric value, then that is used as a seed to construct a - random state. Otherwise the input is returned as-is. - - Adapted from [1]_. - - Args: - rng (int | numpy.random.RandomState | None): - if None, then defaults to the global rng. Otherwise this can be an - integer or a RandomState class - Returns: - (numpy.random.RandomState) : rng - - a numpy random number generator - - References: - .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 - """ - - if rng is None: - rng = np.random.mtrand._rand - elif isinstance(rng, int): - rng = np.random.RandomState(rng) - else: - rng = rng - return rng diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/hooks/logger/wandb.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/hooks/logger/wandb.py deleted file mode 100644 index 9f6808462eb79ab2b04806a5d9f0d3dd079b5ea9..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/hooks/logger/wandb.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ...dist_utils import master_only -from ..hook import HOOKS -from .base import LoggerHook - - -@HOOKS.register_module() -class WandbLoggerHook(LoggerHook): - - def __init__(self, - init_kwargs=None, - interval=10, - ignore_last=True, - reset_flag=False, - commit=True, - by_epoch=True, - with_step=True): - super(WandbLoggerHook, self).__init__(interval, ignore_last, - reset_flag, by_epoch) - self.import_wandb() - self.init_kwargs = init_kwargs - self.commit = commit - self.with_step = with_step - - def import_wandb(self): - try: - import wandb - except ImportError: - raise ImportError( - 'Please run "pip install wandb" to install wandb') - self.wandb = wandb - - @master_only - def before_run(self, runner): - super(WandbLoggerHook, self).before_run(runner) - if self.wandb is None: - self.import_wandb() - if self.init_kwargs: - self.wandb.init(**self.init_kwargs) - else: - self.wandb.init() - - @master_only - def log(self, runner): - tags = self.get_loggable_tags(runner) - if tags: - if self.with_step: - self.wandb.log( - tags, step=self.get_iter(runner), commit=self.commit) - else: - tags['global_step'] = self.get_iter(runner) - self.wandb.log(tags, commit=self.commit) - - @master_only - def after_run(self, runner): - self.wandb.join() diff --git a/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/src/retrievers/base_retriever.py b/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/src/retrievers/base_retriever.py deleted file mode 100644 index 1d7642bc05a2a092f15f34fa82766ac5dfeaa1df..0000000000000000000000000000000000000000 --- a/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/src/retrievers/base_retriever.py +++ /dev/null @@ -1,12 +0,0 @@ -from typing import Dict, List, Tuple - -import numpy as np - -RetrieveTypeResult = Dict[str, List[str]] -RetrieveTypeScores = np.ndarray -RetrieveType = Tuple[RetrieveTypeScores, RetrieveTypeResult] - - -class Retriever(): - def retrieve(self, query: str, k: int) -> RetrieveType: - raise NotImplementedError() diff --git a/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/transforms.py b/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/layers_123812KB .py b/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/layers_123812KB .py deleted file mode 100644 index b82f06bb4993cd63f076e68d7e24185269b1bc42..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/layers_123812KB .py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/Sinestreaa/Test02/Dockerfile b/spaces/Sinestreaa/Test02/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/Sinestreaa/Test02/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/SouthCity/ShuruiXu/config.py b/spaces/SouthCity/ShuruiXu/config.py deleted file mode 100644 index d986750a3912abb41e5b1ee3edd79509e6843bd8..0000000000000000000000000000000000000000 --- a/spaces/SouthCity/ShuruiXu/config.py +++ /dev/null @@ -1,46 +0,0 @@ -# [step 1]>> 例如: API_KEY = "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" (此key无效) -API_KEY = "sk-此处填API密钥" - -# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改 -USE_PROXY = False -if USE_PROXY: - # 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改 - # 例如 "socks5h://localhost:11284" - # [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http - # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上) - # [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上 - - # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284) - proxies = { - # [协议]:// [地址] :[端口] - "http": "socks5h://localhost:11284", - "https": "socks5h://localhost:11284", - } -else: - proxies = None - - -# [step 3]>> 以下配置可以优化体验,但大部分场合下并不需要修改 -# 对话窗的高度 -CHATBOT_HEIGHT = 1115 - -# 发送请求到OpenAI后,等待多久判定为超时 -TIMEOUT_SECONDS = 25 - -# 网页的端口, -1代表随机端口 -WEB_PORT = -1 - -# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 -MAX_RETRY = 2 - -# OpenAI模型选择是(gpt4现在只对申请成功的人开放) -LLM_MODEL = "gpt-3.5-turbo" - -# OpenAI的API_URL -API_URL = "https://api.openai.com/v1/chat/completions" - -# 设置并行使用的线程数 -CONCURRENT_COUNT = 100 - -# 设置用户名和密码(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个) -AUTHENTICATION = [] # [("username", "password"), ("username2", "password2"), ...] diff --git a/spaces/Sujal7/shikshaconnect/index.html b/spaces/Sujal7/shikshaconnect/index.html deleted file mode 100644 index cb4d633a0ad7b2ce59abf854c24471e119068505..0000000000000000000000000000000000000000 --- a/spaces/Sujal7/shikshaconnect/index.html +++ /dev/null @@ -1,68 +0,0 @@ - - - - Shikshaconnect - - - - -
-

Shikshaconnect

- -
-
-
-

Registration/Login

-
- - - - - - - - - - - - - - - -
-
-
- -
-

Data

-

Here is the data filled by the user.

-
-
-

Curriculum

-

Here is the curriculum section.

-
-
-

Monitoring and Evaluation

-

Here is the monitoring and evaluation section.

-
-
-

Admission

-

Here is the admission section.

-
-
-
- - - \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_latextools.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_latextools.py deleted file mode 100644 index d035752b4fe607113c98d97249da9540a3bdce9d..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/lib/tests/test_latextools.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Tests for IPython.utils.path.py""" -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -from contextlib import contextmanager -from unittest.mock import patch - -import pytest - -from IPython.lib import latextools -from IPython.testing.decorators import ( - onlyif_cmds_exist, - skipif_not_matplotlib, -) -from IPython.utils.process import FindCmdError - - -@pytest.mark.parametrize('command', ['latex', 'dvipng']) -def test_check_latex_to_png_dvipng_fails_when_no_cmd(command): - def mock_find_cmd(arg): - if arg == command: - raise FindCmdError - - with patch.object(latextools, "find_cmd", mock_find_cmd): - assert latextools.latex_to_png_dvipng("whatever", True) is None - - -@contextmanager -def no_op(*args, **kwargs): - yield - - -@onlyif_cmds_exist("latex", "dvipng") -@pytest.mark.parametrize("s, wrap", [("$$x^2$$", False), ("x^2", True)]) -def test_latex_to_png_dvipng_runs(s, wrap): - """ - Test that latex_to_png_dvipng just runs without error. - """ - def mock_kpsewhich(filename): - assert filename == "breqn.sty" - return None - - latextools.latex_to_png_dvipng(s, wrap) - - with patch_latextool(mock_kpsewhich): - latextools.latex_to_png_dvipng(s, wrap) - - -def mock_kpsewhich(filename): - assert filename == "breqn.sty" - return None - -@contextmanager -def patch_latextool(mock=mock_kpsewhich): - with patch.object(latextools, "kpsewhich", mock): - yield - -@pytest.mark.parametrize('context', [no_op, patch_latextool]) -@pytest.mark.parametrize('s_wrap', [("$x^2$", False), ("x^2", True)]) -def test_latex_to_png_mpl_runs(s_wrap, context): - """ - Test that latex_to_png_mpl just runs without error. - """ - try: - import matplotlib - except ImportError: - pytest.skip("This needs matplotlib to be available") - return - s, wrap = s_wrap - with context(): - latextools.latex_to_png_mpl(s, wrap) - -@skipif_not_matplotlib -def test_latex_to_html(): - img = latextools.latex_to_html("$x^2$") - assert "data:image/png;base64,iVBOR" in img - - -def test_genelatex_no_wrap(): - """ - Test genelatex with wrap=False. - """ - def mock_kpsewhich(filename): - assert False, ("kpsewhich should not be called " - "(called with {0})".format(filename)) - - with patch_latextool(mock_kpsewhich): - assert '\n'.join(latextools.genelatex("body text", False)) == r'''\documentclass{article} -\usepackage{amsmath} -\usepackage{amsthm} -\usepackage{amssymb} -\usepackage{bm} -\pagestyle{empty} -\begin{document} -body text -\end{document}''' - - -def test_genelatex_wrap_with_breqn(): - """ - Test genelatex with wrap=True for the case breqn.sty is installed. - """ - def mock_kpsewhich(filename): - assert filename == "breqn.sty" - return "path/to/breqn.sty" - - with patch_latextool(mock_kpsewhich): - assert '\n'.join(latextools.genelatex("x^2", True)) == r'''\documentclass{article} -\usepackage{amsmath} -\usepackage{amsthm} -\usepackage{amssymb} -\usepackage{bm} -\usepackage{breqn} -\pagestyle{empty} -\begin{document} -\begin{dmath*} -x^2 -\end{dmath*} -\end{document}''' - - -def test_genelatex_wrap_without_breqn(): - """ - Test genelatex with wrap=True for the case breqn.sty is not installed. - """ - def mock_kpsewhich(filename): - assert filename == "breqn.sty" - return None - - with patch_latextool(mock_kpsewhich): - assert '\n'.join(latextools.genelatex("x^2", True)) == r'''\documentclass{article} -\usepackage{amsmath} -\usepackage{amsthm} -\usepackage{amssymb} -\usepackage{bm} -\pagestyle{empty} -\begin{document} -$$x^2$$ -\end{document}''' - - -@skipif_not_matplotlib -@onlyif_cmds_exist('latex', 'dvipng') -def test_latex_to_png_color(): - """ - Test color settings for latex_to_png. - """ - latex_string = "$x^2$" - default_value = latextools.latex_to_png(latex_string, wrap=False) - default_hexblack = latextools.latex_to_png(latex_string, wrap=False, - color='#000000') - dvipng_default = latextools.latex_to_png_dvipng(latex_string, False) - dvipng_black = latextools.latex_to_png_dvipng(latex_string, False, 'Black') - assert dvipng_default == dvipng_black - mpl_default = latextools.latex_to_png_mpl(latex_string, False) - mpl_black = latextools.latex_to_png_mpl(latex_string, False, 'Black') - assert mpl_default == mpl_black - assert default_value in [dvipng_black, mpl_black] - assert default_hexblack in [dvipng_black, mpl_black] - - # Test that dvips name colors can be used without error - dvipng_maroon = latextools.latex_to_png_dvipng(latex_string, False, - 'Maroon') - # And that it doesn't return the black one - assert dvipng_black != dvipng_maroon - - mpl_maroon = latextools.latex_to_png_mpl(latex_string, False, 'Maroon') - assert mpl_black != mpl_maroon - mpl_white = latextools.latex_to_png_mpl(latex_string, False, 'White') - mpl_hexwhite = latextools.latex_to_png_mpl(latex_string, False, '#FFFFFF') - assert mpl_white == mpl_hexwhite - - mpl_white_scale = latextools.latex_to_png_mpl(latex_string, False, - 'White', 1.2) - assert mpl_white != mpl_white_scale - - -def test_latex_to_png_invalid_hex_colors(): - """ - Test that invalid hex colors provided to dvipng gives an exception. - """ - latex_string = "$x^2$" - pytest.raises( - ValueError, - lambda: latextools.latex_to_png( - latex_string, backend="dvipng", color="#f00bar" - ), - ) - pytest.raises( - ValueError, - lambda: latextools.latex_to_png(latex_string, backend="dvipng", color="#f00"), - ) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/testing/tests/test_tools.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/testing/tests/test_tools.py deleted file mode 100644 index 178863cf68640a618dc1b2ad3990edf1eea64699..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/testing/tests/test_tools.py +++ /dev/null @@ -1,133 +0,0 @@ -# encoding: utf-8 -""" -Tests for testing.tools -""" - -#----------------------------------------------------------------------------- -# Copyright (C) 2008-2011 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -import os -import unittest - -from IPython.testing import decorators as dec -from IPython.testing import tools as tt - -#----------------------------------------------------------------------------- -# Tests -#----------------------------------------------------------------------------- - -@dec.skip_win32 -def test_full_path_posix(): - spath = "/foo/bar.py" - result = tt.full_path(spath, ["a.txt", "b.txt"]) - assert result, ["/foo/a.txt" == "/foo/b.txt"] - spath = "/foo" - result = tt.full_path(spath, ["a.txt", "b.txt"]) - assert result, ["/a.txt" == "/b.txt"] - result = tt.full_path(spath, "a.txt") - assert result == ["/a.txt"] - - -@dec.skip_if_not_win32 -def test_full_path_win32(): - spath = "c:\\foo\\bar.py" - result = tt.full_path(spath, ["a.txt", "b.txt"]) - assert result, ["c:\\foo\\a.txt" == "c:\\foo\\b.txt"] - spath = "c:\\foo" - result = tt.full_path(spath, ["a.txt", "b.txt"]) - assert result, ["c:\\a.txt" == "c:\\b.txt"] - result = tt.full_path(spath, "a.txt") - assert result == ["c:\\a.txt"] - - -def test_parser(): - err = ("FAILED (errors=1)", 1, 0) - fail = ("FAILED (failures=1)", 0, 1) - both = ("FAILED (errors=1, failures=1)", 1, 1) - for txt, nerr, nfail in [err, fail, both]: - nerr1, nfail1 = tt.parse_test_output(txt) - assert nerr == nerr1 - assert nfail == nfail1 - - -def test_temp_pyfile(): - src = 'pass\n' - fname = tt.temp_pyfile(src) - assert os.path.isfile(fname) - with open(fname, encoding="utf-8") as fh2: - src2 = fh2.read() - assert src2 == src - -class TestAssertPrints(unittest.TestCase): - def test_passing(self): - with tt.AssertPrints("abc"): - print("abcd") - print("def") - print(b"ghi") - - def test_failing(self): - def func(): - with tt.AssertPrints("abc"): - print("acd") - print("def") - print(b"ghi") - - self.assertRaises(AssertionError, func) - - -class Test_ipexec_validate(tt.TempFileMixin): - def test_main_path(self): - """Test with only stdout results. - """ - self.mktmp("print('A')\n" - "print('B')\n" - ) - out = "A\nB" - tt.ipexec_validate(self.fname, out) - - def test_main_path2(self): - """Test with only stdout results, expecting windows line endings. - """ - self.mktmp("print('A')\n" - "print('B')\n" - ) - out = "A\r\nB" - tt.ipexec_validate(self.fname, out) - - def test_exception_path(self): - """Test exception path in exception_validate. - """ - self.mktmp("import sys\n" - "print('A')\n" - "print('B')\n" - "print('C', file=sys.stderr)\n" - "print('D', file=sys.stderr)\n" - ) - out = "A\nB" - tt.ipexec_validate(self.fname, expected_out=out, expected_err="C\nD") - - def test_exception_path2(self): - """Test exception path in exception_validate, expecting windows line endings. - """ - self.mktmp("import sys\n" - "print('A')\n" - "print('B')\n" - "print('C', file=sys.stderr)\n" - "print('D', file=sys.stderr)\n" - ) - out = "A\r\nB" - tt.ipexec_validate(self.fname, expected_out=out, expected_err="C\r\nD") - - - def tearDown(self): - # tear down correctly the mixin, - # unittest.TestCase.tearDown does nothing - tt.TempFileMixin.tearDown(self) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/GbrImagePlugin.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/GbrImagePlugin.py deleted file mode 100644 index 994a6e8ebb2f0f2e69990a211d7a1ec4f06b7fd1..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/GbrImagePlugin.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# The Python Imaging Library -# -# load a GIMP brush file -# -# History: -# 96-03-14 fl Created -# 16-01-08 es Version 2 -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1996. -# Copyright (c) Eric Soroos 2016. -# -# See the README file for information on usage and redistribution. -# -# -# See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for -# format documentation. -# -# This code Interprets version 1 and 2 .gbr files. -# Version 1 files are obsolete, and should not be used for new -# brushes. -# Version 2 files are saved by GIMP v2.8 (at least) -# Version 3 files have a format specifier of 18 for 16bit floats in -# the color depth field. This is currently unsupported by Pillow. - -from . import Image, ImageFile -from ._binary import i32be as i32 - - -def _accept(prefix): - return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2) - - -## -# Image plugin for the GIMP brush format. - - -class GbrImageFile(ImageFile.ImageFile): - format = "GBR" - format_description = "GIMP brush file" - - def _open(self): - header_size = i32(self.fp.read(4)) - if header_size < 20: - msg = "not a GIMP brush" - raise SyntaxError(msg) - version = i32(self.fp.read(4)) - if version not in (1, 2): - msg = f"Unsupported GIMP brush version: {version}" - raise SyntaxError(msg) - - width = i32(self.fp.read(4)) - height = i32(self.fp.read(4)) - color_depth = i32(self.fp.read(4)) - if width <= 0 or height <= 0: - msg = "not a GIMP brush" - raise SyntaxError(msg) - if color_depth not in (1, 4): - msg = f"Unsupported GIMP brush color depth: {color_depth}" - raise SyntaxError(msg) - - if version == 1: - comment_length = header_size - 20 - else: - comment_length = header_size - 28 - magic_number = self.fp.read(4) - if magic_number != b"GIMP": - msg = "not a GIMP brush, bad magic number" - raise SyntaxError(msg) - self.info["spacing"] = i32(self.fp.read(4)) - - comment = self.fp.read(comment_length)[:-1] - - if color_depth == 1: - self.mode = "L" - else: - self.mode = "RGBA" - - self._size = width, height - - self.info["comment"] = comment - - # Image might not be small - Image._decompression_bomb_check(self.size) - - # Data is an uncompressed block of w * h * bytes/pixel - self._data_size = width * height * color_depth - - def load(self): - if not self.im: - self.im = Image.core.new(self.mode, self.size) - self.frombytes(self.fp.read(self._data_size)) - return Image.Image.load(self) - - -# -# registry - - -Image.register_open(GbrImageFile.format, GbrImageFile, _accept) -Image.register_extension(GbrImageFile.format, ".gbr") diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/PpmImagePlugin.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/PpmImagePlugin.py deleted file mode 100644 index 2cb1e56365dc369d6719717f0f6775c8c9e2fdd4..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/PpmImagePlugin.py +++ /dev/null @@ -1,347 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# PPM support for PIL -# -# History: -# 96-03-24 fl Created -# 98-03-06 fl Write RGBA images (as RGB, that is) -# -# Copyright (c) Secret Labs AB 1997-98. -# Copyright (c) Fredrik Lundh 1996. -# -# See the README file for information on usage and redistribution. -# - - -from . import Image, ImageFile -from ._binary import i16be as i16 -from ._binary import o8 -from ._binary import o32le as o32 - -# -# -------------------------------------------------------------------- - -b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d" - -MODES = { - # standard - b"P1": "1", - b"P2": "L", - b"P3": "RGB", - b"P4": "1", - b"P5": "L", - b"P6": "RGB", - # extensions - b"P0CMYK": "CMYK", - # PIL extensions (for test purposes only) - b"PyP": "P", - b"PyRGBA": "RGBA", - b"PyCMYK": "CMYK", -} - - -def _accept(prefix): - return prefix[0:1] == b"P" and prefix[1] in b"0123456y" - - -## -# Image plugin for PBM, PGM, and PPM images. - - -class PpmImageFile(ImageFile.ImageFile): - format = "PPM" - format_description = "Pbmplus image" - - def _read_magic(self): - magic = b"" - # read until whitespace or longest available magic number - for _ in range(6): - c = self.fp.read(1) - if not c or c in b_whitespace: - break - magic += c - return magic - - def _read_token(self): - token = b"" - while len(token) <= 10: # read until next whitespace or limit of 10 characters - c = self.fp.read(1) - if not c: - break - elif c in b_whitespace: # token ended - if not token: - # skip whitespace at start - continue - break - elif c == b"#": - # ignores rest of the line; stops at CR, LF or EOF - while self.fp.read(1) not in b"\r\n": - pass - continue - token += c - if not token: - # Token was not even 1 byte - msg = "Reached EOF while reading header" - raise ValueError(msg) - elif len(token) > 10: - msg = f"Token too long in file header: {token.decode()}" - raise ValueError(msg) - return token - - def _open(self): - magic_number = self._read_magic() - try: - mode = MODES[magic_number] - except KeyError: - msg = "not a PPM file" - raise SyntaxError(msg) - - if magic_number in (b"P1", b"P4"): - self.custom_mimetype = "image/x-portable-bitmap" - elif magic_number in (b"P2", b"P5"): - self.custom_mimetype = "image/x-portable-graymap" - elif magic_number in (b"P3", b"P6"): - self.custom_mimetype = "image/x-portable-pixmap" - - maxval = None - decoder_name = "raw" - if magic_number in (b"P1", b"P2", b"P3"): - decoder_name = "ppm_plain" - for ix in range(3): - token = int(self._read_token()) - if ix == 0: # token is the x size - xsize = token - elif ix == 1: # token is the y size - ysize = token - if mode == "1": - self.mode = "1" - rawmode = "1;I" - break - else: - self.mode = rawmode = mode - elif ix == 2: # token is maxval - maxval = token - if not 0 < maxval < 65536: - msg = "maxval must be greater than 0 and less than 65536" - raise ValueError(msg) - if maxval > 255 and mode == "L": - self.mode = "I" - - if decoder_name != "ppm_plain": - # If maxval matches a bit depth, use the raw decoder directly - if maxval == 65535 and mode == "L": - rawmode = "I;16B" - elif maxval != 255: - decoder_name = "ppm" - - args = (rawmode, 0, 1) if decoder_name == "raw" else (rawmode, maxval) - self._size = xsize, ysize - self.tile = [(decoder_name, (0, 0, xsize, ysize), self.fp.tell(), args)] - - -# -# -------------------------------------------------------------------- - - -class PpmPlainDecoder(ImageFile.PyDecoder): - _pulls_fd = True - - def _read_block(self): - return self.fd.read(ImageFile.SAFEBLOCK) - - def _find_comment_end(self, block, start=0): - a = block.find(b"\n", start) - b = block.find(b"\r", start) - return min(a, b) if a * b > 0 else max(a, b) # lowest nonnegative index (or -1) - - def _ignore_comments(self, block): - if self._comment_spans: - # Finish current comment - while block: - comment_end = self._find_comment_end(block) - if comment_end != -1: - # Comment ends in this block - # Delete tail of comment - block = block[comment_end + 1 :] - break - else: - # Comment spans whole block - # So read the next block, looking for the end - block = self._read_block() - - # Search for any further comments - self._comment_spans = False - while True: - comment_start = block.find(b"#") - if comment_start == -1: - # No comment found - break - comment_end = self._find_comment_end(block, comment_start) - if comment_end != -1: - # Comment ends in this block - # Delete comment - block = block[:comment_start] + block[comment_end + 1 :] - else: - # Comment continues to next block(s) - block = block[:comment_start] - self._comment_spans = True - break - return block - - def _decode_bitonal(self): - """ - This is a separate method because in the plain PBM format, all data tokens are - exactly one byte, so the inter-token whitespace is optional. - """ - data = bytearray() - total_bytes = self.state.xsize * self.state.ysize - - while len(data) != total_bytes: - block = self._read_block() # read next block - if not block: - # eof - break - - block = self._ignore_comments(block) - - tokens = b"".join(block.split()) - for token in tokens: - if token not in (48, 49): - msg = b"Invalid token for this mode: %s" % bytes([token]) - raise ValueError(msg) - data = (data + tokens)[:total_bytes] - invert = bytes.maketrans(b"01", b"\xFF\x00") - return data.translate(invert) - - def _decode_blocks(self, maxval): - data = bytearray() - max_len = 10 - out_byte_count = 4 if self.mode == "I" else 1 - out_max = 65535 if self.mode == "I" else 255 - bands = Image.getmodebands(self.mode) - total_bytes = self.state.xsize * self.state.ysize * bands * out_byte_count - - half_token = False - while len(data) != total_bytes: - block = self._read_block() # read next block - if not block: - if half_token: - block = bytearray(b" ") # flush half_token - else: - # eof - break - - block = self._ignore_comments(block) - - if half_token: - block = half_token + block # stitch half_token to new block - half_token = False - - tokens = block.split() - - if block and not block[-1:].isspace(): # block might split token - half_token = tokens.pop() # save half token for later - if len(half_token) > max_len: # prevent buildup of half_token - msg = ( - b"Token too long found in data: %s" % half_token[: max_len + 1] - ) - raise ValueError(msg) - - for token in tokens: - if len(token) > max_len: - msg = b"Token too long found in data: %s" % token[: max_len + 1] - raise ValueError(msg) - value = int(token) - if value > maxval: - msg = f"Channel value too large for this mode: {value}" - raise ValueError(msg) - value = round(value / maxval * out_max) - data += o32(value) if self.mode == "I" else o8(value) - if len(data) == total_bytes: # finished! - break - return data - - def decode(self, buffer): - self._comment_spans = False - if self.mode == "1": - data = self._decode_bitonal() - rawmode = "1;8" - else: - maxval = self.args[-1] - data = self._decode_blocks(maxval) - rawmode = "I;32" if self.mode == "I" else self.mode - self.set_as_raw(bytes(data), rawmode) - return -1, 0 - - -class PpmDecoder(ImageFile.PyDecoder): - _pulls_fd = True - - def decode(self, buffer): - data = bytearray() - maxval = self.args[-1] - in_byte_count = 1 if maxval < 256 else 2 - out_byte_count = 4 if self.mode == "I" else 1 - out_max = 65535 if self.mode == "I" else 255 - bands = Image.getmodebands(self.mode) - while len(data) < self.state.xsize * self.state.ysize * bands * out_byte_count: - pixels = self.fd.read(in_byte_count * bands) - if len(pixels) < in_byte_count * bands: - # eof - break - for b in range(bands): - value = ( - pixels[b] if in_byte_count == 1 else i16(pixels, b * in_byte_count) - ) - value = min(out_max, round(value / maxval * out_max)) - data += o32(value) if self.mode == "I" else o8(value) - rawmode = "I;32" if self.mode == "I" else self.mode - self.set_as_raw(bytes(data), rawmode) - return -1, 0 - - -# -# -------------------------------------------------------------------- - - -def _save(im, fp, filename): - if im.mode == "1": - rawmode, head = "1;I", b"P4" - elif im.mode == "L": - rawmode, head = "L", b"P5" - elif im.mode == "I": - rawmode, head = "I;16B", b"P5" - elif im.mode in ("RGB", "RGBA"): - rawmode, head = "RGB", b"P6" - else: - msg = f"cannot write mode {im.mode} as PPM" - raise OSError(msg) - fp.write(head + b"\n%d %d\n" % im.size) - if head == b"P6": - fp.write(b"255\n") - elif head == b"P5": - if rawmode == "L": - fp.write(b"255\n") - else: - fp.write(b"65535\n") - ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]) - - # ALTERNATIVE: save via builtin debug function - # im._dump(filename) - - -# -# -------------------------------------------------------------------- - - -Image.register_open(PpmImageFile.format, PpmImageFile, _accept) -Image.register_save(PpmImageFile.format, _save) - -Image.register_decoder("ppm", PpmDecoder) -Image.register_decoder("ppm_plain", PpmPlainDecoder) - -Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm"]) - -Image.register_mime(PpmImageFile.format, "image/x-portable-anymap") diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/contourpy/util/mpl_renderer.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/contourpy/util/mpl_renderer.py deleted file mode 100644 index dbcb5ca19a01e3ae000986673d66def23f9c2eac..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/contourpy/util/mpl_renderer.py +++ /dev/null @@ -1,613 +0,0 @@ -from __future__ import annotations - -import io -from typing import TYPE_CHECKING, Any, cast - -import matplotlib.collections as mcollections -import matplotlib.pyplot as plt -import numpy as np - -from contourpy import FillType, LineType -from contourpy.util.mpl_util import filled_to_mpl_paths, lines_to_mpl_paths, mpl_codes_to_offsets -from contourpy.util.renderer import Renderer - -if TYPE_CHECKING: - from matplotlib.axes import Axes - from matplotlib.figure import Figure - from numpy.typing import ArrayLike - - import contourpy._contourpy as cpy - - -class MplRenderer(Renderer): - _axes: Axes - _fig: Figure - _want_tight: bool - - """Utility renderer using Matplotlib to render a grid of plots over the same (x, y) range. - - Args: - nrows (int, optional): Number of rows of plots, default ``1``. - ncols (int, optional): Number of columns of plots, default ``1``. - figsize (tuple(float, float), optional): Figure size in inches, default ``(9, 9)``. - show_frame (bool, optional): Whether to show frame and axes ticks, default ``True``. - backend (str, optional): Matplotlib backend to use or ``None`` for default backend. - Default ``None``. - gridspec_kw (dict, optional): Gridspec keyword arguments to pass to ``plt.subplots``, - default None. - """ - def __init__( - self, - nrows: int = 1, - ncols: int = 1, - figsize: tuple[float, float] = (9, 9), - show_frame: bool = True, - backend: str | None = None, - gridspec_kw: dict[str, Any] | None = None, - ) -> None: - if backend is not None: - import matplotlib - matplotlib.use(backend) - - kwargs = dict(figsize=figsize, squeeze=False, sharex=True, sharey=True) - if gridspec_kw is not None: - kwargs["gridspec_kw"] = gridspec_kw - else: - kwargs["subplot_kw"] = dict(aspect="equal") - - self._fig, axes = plt.subplots(nrows, ncols, **kwargs) - self._axes = axes.flatten() - if not show_frame: - for ax in self._axes: - ax.axis("off") - - self._want_tight = True - - def __del__(self) -> None: - if hasattr(self, "_fig"): - plt.close(self._fig) - - def _autoscale(self) -> None: - # Using axes._need_autoscale attribute if need to autoscale before rendering after adding - # lines/filled. Only want to autoscale once per axes regardless of how many lines/filled - # added. - for ax in self._axes: - if getattr(ax, "_need_autoscale", False): - ax.autoscale_view(tight=True) - ax._need_autoscale = False - if self._want_tight and len(self._axes) > 1: - self._fig.tight_layout() - - def _get_ax(self, ax: Axes | int) -> Axes: - if isinstance(ax, int): - ax = self._axes[ax] - return ax - - def filled( - self, - filled: cpy.FillReturn, - fill_type: FillType, - ax: Axes | int = 0, - color: str = "C0", - alpha: float = 0.7, - ) -> None: - """Plot filled contours on a single Axes. - - Args: - filled (sequence of arrays): Filled contour data as returned by - :func:`~contourpy.ContourGenerator.filled`. - fill_type (FillType): Type of ``filled`` data, as returned by - :attr:`~contourpy.ContourGenerator.fill_type`. - ax (int or Maplotlib Axes, optional): Which axes to plot on, default ``0``. - color (str, optional): Color to plot with. May be a string color or the letter ``"C"`` - followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the - ``tab10`` colormap. Default ``"C0"``. - alpha (float, optional): Opacity to plot with, default ``0.7``. - """ - ax = self._get_ax(ax) - paths = filled_to_mpl_paths(filled, fill_type) - collection = mcollections.PathCollection( - paths, facecolors=color, edgecolors="none", lw=0, alpha=alpha) - ax.add_collection(collection) - ax._need_autoscale = True - - def grid( - self, - x: ArrayLike, - y: ArrayLike, - ax: Axes | int = 0, - color: str = "black", - alpha: float = 0.1, - point_color: str | None = None, - quad_as_tri_alpha: float = 0, - ) -> None: - """Plot quad grid lines on a single Axes. - - Args: - x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points. - y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points. - ax (int or Matplotlib Axes, optional): Which Axes to plot on, default ``0``. - color (str, optional): Color to plot grid lines, default ``"black"``. - alpha (float, optional): Opacity to plot lines with, default ``0.1``. - point_color (str, optional): Color to plot grid points or ``None`` if grid points - should not be plotted, default ``None``. - quad_as_tri_alpha (float, optional): Opacity to plot ``quad_as_tri`` grid, default 0. - - Colors may be a string color or the letter ``"C"`` followed by an integer in the range - ``"C0"`` to ``"C9"`` to use a color from the ``tab10`` colormap. - - Warning: - ``quad_as_tri_alpha > 0`` plots all quads as though they are unmasked. - """ - ax = self._get_ax(ax) - x, y = self._grid_as_2d(x, y) - kwargs = dict(color=color, alpha=alpha) - ax.plot(x, y, x.T, y.T, **kwargs) - if quad_as_tri_alpha > 0: - # Assumes no quad mask. - xmid = 0.25*(x[:-1, :-1] + x[1:, :-1] + x[:-1, 1:] + x[1:, 1:]) - ymid = 0.25*(y[:-1, :-1] + y[1:, :-1] + y[:-1, 1:] + y[1:, 1:]) - kwargs["alpha"] = quad_as_tri_alpha - ax.plot( - np.stack((x[:-1, :-1], xmid, x[1:, 1:])).reshape((3, -1)), - np.stack((y[:-1, :-1], ymid, y[1:, 1:])).reshape((3, -1)), - np.stack((x[1:, :-1], xmid, x[:-1, 1:])).reshape((3, -1)), - np.stack((y[1:, :-1], ymid, y[:-1, 1:])).reshape((3, -1)), - **kwargs) - if point_color is not None: - ax.plot(x, y, color=point_color, alpha=alpha, marker="o", lw=0) - ax._need_autoscale = True - - def lines( - self, - lines: cpy.LineReturn, - line_type: LineType, - ax: Axes | int = 0, - color: str = "C0", - alpha: float = 1.0, - linewidth: float = 1, - ) -> None: - """Plot contour lines on a single Axes. - - Args: - lines (sequence of arrays): Contour line data as returned by - :func:`~contourpy.ContourGenerator.lines`. - line_type (LineType): Type of ``lines`` data, as returned by - :attr:`~contourpy.ContourGenerator.line_type`. - ax (int or Matplotlib Axes, optional): Which Axes to plot on, default ``0``. - color (str, optional): Color to plot lines. May be a string color or the letter ``"C"`` - followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the - ``tab10`` colormap. Default ``"C0"``. - alpha (float, optional): Opacity to plot lines with, default ``1.0``. - linewidth (float, optional): Width of lines, default ``1``. - """ - ax = self._get_ax(ax) - paths = lines_to_mpl_paths(lines, line_type) - collection = mcollections.PathCollection( - paths, facecolors="none", edgecolors=color, lw=linewidth, alpha=alpha) - ax.add_collection(collection) - ax._need_autoscale = True - - def mask( - self, - x: ArrayLike, - y: ArrayLike, - z: ArrayLike | np.ma.MaskedArray[Any, Any], - ax: Axes | int = 0, - color: str = "black", - ) -> None: - """Plot masked out grid points as circles on a single Axes. - - Args: - x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points. - y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points. - z (masked array of shape (ny, nx): z-values. - ax (int or Matplotlib Axes, optional): Which Axes to plot on, default ``0``. - color (str, optional): Circle color, default ``"black"``. - """ - mask = np.ma.getmask(z) # type: ignore[no-untyped-call] - if mask is np.ma.nomask: - return - ax = self._get_ax(ax) - x, y = self._grid_as_2d(x, y) - ax.plot(x[mask], y[mask], "o", c=color) - - def save(self, filename: str, transparent: bool = False) -> None: - """Save plots to SVG or PNG file. - - Args: - filename (str): Filename to save to. - transparent (bool, optional): Whether background should be transparent, default - ``False``. - """ - self._autoscale() - self._fig.savefig(filename, transparent=transparent) - - def save_to_buffer(self) -> io.BytesIO: - """Save plots to an ``io.BytesIO`` buffer. - - Return: - BytesIO: PNG image buffer. - """ - self._autoscale() - buf = io.BytesIO() - self._fig.savefig(buf, format="png") - buf.seek(0) - return buf - - def show(self) -> None: - """Show plots in an interactive window, in the usual Matplotlib manner. - """ - self._autoscale() - plt.show() - - def title(self, title: str, ax: Axes | int = 0, color: str | None = None) -> None: - """Set the title of a single Axes. - - Args: - title (str): Title text. - ax (int or Matplotlib Axes, optional): Which Axes to set the title of, default ``0``. - color (str, optional): Color to set title. May be a string color or the letter ``"C"`` - followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the - ``tab10`` colormap. Default is ``None`` which uses Matplotlib's default title color - that depends on the stylesheet in use. - """ - if color: - self._get_ax(ax).set_title(title, color=color) - else: - self._get_ax(ax).set_title(title) - - def z_values( - self, - x: ArrayLike, - y: ArrayLike, - z: ArrayLike, - ax: Axes | int = 0, - color: str = "green", - fmt: str = ".1f", - quad_as_tri: bool = False, - ) -> None: - """Show ``z`` values on a single Axes. - - Args: - x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points. - y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points. - z (array-like of shape (ny, nx): z-values. - ax (int or Matplotlib Axes, optional): Which Axes to plot on, default ``0``. - color (str, optional): Color of added text. May be a string color or the letter ``"C"`` - followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the - ``tab10`` colormap. Default ``"green"``. - fmt (str, optional): Format to display z-values, default ``".1f"``. - quad_as_tri (bool, optional): Whether to show z-values at the ``quad_as_tri`` centers - of quads. - - Warning: - ``quad_as_tri=True`` shows z-values for all quads, even if masked. - """ - ax = self._get_ax(ax) - x, y = self._grid_as_2d(x, y) - z = np.asarray(z) - ny, nx = z.shape - for j in range(ny): - for i in range(nx): - ax.text(x[j, i], y[j, i], f"{z[j, i]:{fmt}}", ha="center", va="center", - color=color, clip_on=True) - if quad_as_tri: - for j in range(ny-1): - for i in range(nx-1): - xx = np.mean(x[j:j+2, i:i+2]) - yy = np.mean(y[j:j+2, i:i+2]) - zz = np.mean(z[j:j+2, i:i+2]) - ax.text(xx, yy, f"{zz:{fmt}}", ha="center", va="center", color=color, - clip_on=True) - - -class MplTestRenderer(MplRenderer): - """Test renderer implemented using Matplotlib. - - No whitespace around plots and no spines/ticks displayed. - Uses Agg backend, so can only save to file/buffer, cannot call ``show()``. - """ - def __init__( - self, - nrows: int = 1, - ncols: int = 1, - figsize: tuple[float, float] = (9, 9), - ) -> None: - gridspec = { - "left": 0.01, - "right": 0.99, - "top": 0.99, - "bottom": 0.01, - "wspace": 0.01, - "hspace": 0.01, - } - super().__init__( - nrows, ncols, figsize, show_frame=True, backend="Agg", gridspec_kw=gridspec, - ) - - for ax in self._axes: - ax.set_xmargin(0.0) - ax.set_ymargin(0.0) - ax.set_xticks([]) - ax.set_yticks([]) - - self._want_tight = False - - -class MplDebugRenderer(MplRenderer): - """Debug renderer implemented using Matplotlib. - - Extends ``MplRenderer`` to add extra information to help in debugging such as markers, arrows, - text, etc. - """ - def __init__( - self, - nrows: int = 1, - ncols: int = 1, - figsize: tuple[float, float] = (9, 9), - show_frame: bool = True, - ) -> None: - super().__init__(nrows, ncols, figsize, show_frame) - - def _arrow( - self, - ax: Axes, - line_start: cpy.CoordinateArray, - line_end: cpy.CoordinateArray, - color: str, - alpha: float, - arrow_size: float, - ) -> None: - mid = 0.5*(line_start + line_end) - along = line_end - line_start - along /= np.sqrt(np.dot(along, along)) # Unit vector. - right = np.asarray((along[1], -along[0])) - arrow = np.stack(( - mid - (along*0.5 - right)*arrow_size, - mid + along*0.5*arrow_size, - mid - (along*0.5 + right)*arrow_size, - )) - ax.plot(arrow[:, 0], arrow[:, 1], "-", c=color, alpha=alpha) - - def _filled_to_lists_of_points_and_offsets( - self, - filled: cpy.FillReturn, - fill_type: FillType, - ) -> tuple[list[cpy.PointArray], list[cpy.OffsetArray]]: - if fill_type == FillType.OuterCode: - if TYPE_CHECKING: - filled = cast(cpy.FillReturn_OuterCode, filled) - all_points = filled[0] - all_offsets = [mpl_codes_to_offsets(codes) for codes in filled[1]] - elif fill_type == FillType.ChunkCombinedCode: - if TYPE_CHECKING: - filled = cast(cpy.FillReturn_ChunkCombinedCode, filled) - all_points = [points for points in filled[0] if points is not None] - all_offsets = [mpl_codes_to_offsets(codes) for codes in filled[1] if codes is not None] - elif fill_type == FillType.OuterOffset: - if TYPE_CHECKING: - filled = cast(cpy.FillReturn_OuterOffset, filled) - all_points = filled[0] - all_offsets = filled[1] - elif fill_type == FillType.ChunkCombinedOffset: - if TYPE_CHECKING: - filled = cast(cpy.FillReturn_ChunkCombinedOffset, filled) - all_points = [points for points in filled[0] if points is not None] - all_offsets = [offsets for offsets in filled[1] if offsets is not None] - elif fill_type == FillType.ChunkCombinedCodeOffset: - if TYPE_CHECKING: - filled = cast(cpy.FillReturn_ChunkCombinedCodeOffset, filled) - all_points = [] - all_offsets = [] - for points, codes, outer_offsets in zip(*filled): - if points is None: - continue - if TYPE_CHECKING: - assert codes is not None and outer_offsets is not None - all_points += np.split(points, outer_offsets[1:-1]) - all_codes = np.split(codes, outer_offsets[1:-1]) - all_offsets += [mpl_codes_to_offsets(codes) for codes in all_codes] - elif fill_type == FillType.ChunkCombinedOffsetOffset: - if TYPE_CHECKING: - filled = cast(cpy.FillReturn_ChunkCombinedOffsetOffset, filled) - all_points = [] - all_offsets = [] - for points, offsets, outer_offsets in zip(*filled): - if points is None: - continue - if TYPE_CHECKING: - assert offsets is not None and outer_offsets is not None - for i in range(len(outer_offsets)-1): - offs = offsets[outer_offsets[i]:outer_offsets[i+1]+1] - all_points.append(points[offs[0]:offs[-1]]) - all_offsets.append(offs - offs[0]) - else: - raise RuntimeError(f"Rendering FillType {fill_type} not implemented") - - return all_points, all_offsets - - def _lines_to_list_of_points( - self, lines: cpy.LineReturn, line_type: LineType, - ) -> list[cpy.PointArray]: - if line_type == LineType.Separate: - if TYPE_CHECKING: - lines = cast(cpy.LineReturn_Separate, lines) - all_lines = lines - elif line_type == LineType.SeparateCode: - if TYPE_CHECKING: - lines = cast(cpy.LineReturn_SeparateCode, lines) - all_lines = lines[0] - elif line_type == LineType.ChunkCombinedCode: - if TYPE_CHECKING: - lines = cast(cpy.LineReturn_ChunkCombinedCode, lines) - all_lines = [] - for points, codes in zip(*lines): - if points is not None: - if TYPE_CHECKING: - assert codes is not None - offsets = mpl_codes_to_offsets(codes) - for i in range(len(offsets)-1): - all_lines.append(points[offsets[i]:offsets[i+1]]) - elif line_type == LineType.ChunkCombinedOffset: - if TYPE_CHECKING: - lines = cast(cpy.LineReturn_ChunkCombinedOffset, lines) - all_lines = [] - for points, all_offsets in zip(*lines): - if points is not None: - if TYPE_CHECKING: - assert all_offsets is not None - for i in range(len(all_offsets)-1): - all_lines.append(points[all_offsets[i]:all_offsets[i+1]]) - else: - raise RuntimeError(f"Rendering LineType {line_type} not implemented") - - return all_lines - - def filled( - self, - filled: cpy.FillReturn, - fill_type: FillType, - ax: Axes | int = 0, - color: str = "C1", - alpha: float = 0.7, - line_color: str = "C0", - line_alpha: float = 0.7, - point_color: str = "C0", - start_point_color: str = "red", - arrow_size: float = 0.1, - ) -> None: - super().filled(filled, fill_type, ax, color, alpha) - - if line_color is None and point_color is None: - return - - ax = self._get_ax(ax) - all_points, all_offsets = self._filled_to_lists_of_points_and_offsets(filled, fill_type) - - # Lines. - if line_color is not None: - for points, offsets in zip(all_points, all_offsets): - for start, end in zip(offsets[:-1], offsets[1:]): - xys = points[start:end] - ax.plot(xys[:, 0], xys[:, 1], c=line_color, alpha=line_alpha) - - if arrow_size > 0.0: - n = len(xys) - for i in range(n-1): - self._arrow(ax, xys[i], xys[i+1], line_color, line_alpha, arrow_size) - - # Points. - if point_color is not None: - for points, offsets in zip(all_points, all_offsets): - mask = np.ones(offsets[-1], dtype=bool) - mask[offsets[1:]-1] = False # Exclude end points. - if start_point_color is not None: - start_indices = offsets[:-1] - mask[start_indices] = False # Exclude start points. - ax.plot( - points[:, 0][mask], points[:, 1][mask], "o", c=point_color, alpha=line_alpha) - - if start_point_color is not None: - ax.plot(points[:, 0][start_indices], points[:, 1][start_indices], "o", - c=start_point_color, alpha=line_alpha) - - def lines( - self, - lines: cpy.LineReturn, - line_type: LineType, - ax: Axes | int = 0, - color: str = "C0", - alpha: float = 1.0, - linewidth: float = 1, - point_color: str = "C0", - start_point_color: str = "red", - arrow_size: float = 0.1, - ) -> None: - super().lines(lines, line_type, ax, color, alpha, linewidth) - - if arrow_size == 0.0 and point_color is None: - return - - ax = self._get_ax(ax) - all_lines = self._lines_to_list_of_points(lines, line_type) - - if arrow_size > 0.0: - for line in all_lines: - for i in range(len(line)-1): - self._arrow(ax, line[i], line[i+1], color, alpha, arrow_size) - - if point_color is not None: - for line in all_lines: - start_index = 0 - end_index = len(line) - if start_point_color is not None: - ax.plot(line[0, 0], line[0, 1], "o", c=start_point_color, alpha=alpha) - start_index = 1 - if line[0][0] == line[-1][0] and line[0][1] == line[-1][1]: - end_index -= 1 - ax.plot(line[start_index:end_index, 0], line[start_index:end_index, 1], "o", - c=color, alpha=alpha) - - def point_numbers( - self, - x: ArrayLike, - y: ArrayLike, - z: ArrayLike, - ax: Axes | int = 0, - color: str = "red", - ) -> None: - ax = self._get_ax(ax) - x, y = self._grid_as_2d(x, y) - z = np.asarray(z) - ny, nx = z.shape - for j in range(ny): - for i in range(nx): - quad = i + j*nx - ax.text(x[j, i], y[j, i], str(quad), ha="right", va="top", color=color, - clip_on=True) - - def quad_numbers( - self, - x: ArrayLike, - y: ArrayLike, - z: ArrayLike, - ax: Axes | int = 0, - color: str = "blue", - ) -> None: - ax = self._get_ax(ax) - x, y = self._grid_as_2d(x, y) - z = np.asarray(z) - ny, nx = z.shape - for j in range(1, ny): - for i in range(1, nx): - quad = i + j*nx - xmid = x[j-1:j+1, i-1:i+1].mean() - ymid = y[j-1:j+1, i-1:i+1].mean() - ax.text(xmid, ymid, str(quad), ha="center", va="center", color=color, clip_on=True) - - def z_levels( - self, - x: ArrayLike, - y: ArrayLike, - z: ArrayLike, - lower_level: float, - upper_level: float | None = None, - ax: Axes | int = 0, - color: str = "green", - ) -> None: - ax = self._get_ax(ax) - x, y = self._grid_as_2d(x, y) - z = np.asarray(z) - ny, nx = z.shape - for j in range(ny): - for i in range(nx): - zz = z[j, i] - if upper_level is not None and zz > upper_level: - z_level = 2 - elif zz > lower_level: - z_level = 1 - else: - z_level = 0 - ax.text(x[j, i], y[j, i], z_level, ha="left", va="bottom", color=color, - clip_on=True) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/__init__.py deleted file mode 100644 index f970fbc009289782ee324839fe7261da5acb0bc2..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/__init__.py +++ /dev/null @@ -1,130 +0,0 @@ -__version__ = "0.13.0.dev" - -__all__ = [ - "Label", - "Instr", - "SetLineno", - "Bytecode", - "ConcreteInstr", - "ConcreteBytecode", - "ControlFlowGraph", - "CompilerFlags", - "Compare", -] - -from _pydevd_frame_eval.vendored.bytecode.flags import CompilerFlags -from _pydevd_frame_eval.vendored.bytecode.instr import ( - UNSET, - Label, - SetLineno, - Instr, - CellVar, - FreeVar, # noqa - Compare, -) -from _pydevd_frame_eval.vendored.bytecode.bytecode import ( - BaseBytecode, - _BaseBytecodeList, - _InstrList, - Bytecode, -) # noqa -from _pydevd_frame_eval.vendored.bytecode.concrete import ( - ConcreteInstr, - ConcreteBytecode, # noqa - # import needed to use it in bytecode.py - _ConvertBytecodeToConcrete, -) -from _pydevd_frame_eval.vendored.bytecode.cfg import BasicBlock, ControlFlowGraph # noqa -import sys - -def dump_bytecode(bytecode, *, lineno=False, stream=sys.stdout): - def format_line(index, line): - nonlocal cur_lineno, prev_lineno - if lineno: - if cur_lineno != prev_lineno: - line = "L.% 3s % 3s: %s" % (cur_lineno, index, line) - prev_lineno = cur_lineno - else: - line = " % 3s: %s" % (index, line) - else: - line = line - return line - - def format_instr(instr, labels=None): - text = instr.name - arg = instr._arg - if arg is not UNSET: - if isinstance(arg, Label): - try: - arg = "<%s>" % labels[arg] - except KeyError: - arg = "" - elif isinstance(arg, BasicBlock): - try: - arg = "<%s>" % labels[id(arg)] - except KeyError: - arg = "" - else: - arg = repr(arg) - text = "%s %s" % (text, arg) - return text - - indent = " " * 4 - - cur_lineno = bytecode.first_lineno - prev_lineno = None - - if isinstance(bytecode, ConcreteBytecode): - offset = 0 - for instr in bytecode: - fields = [] - if instr.lineno is not None: - cur_lineno = instr.lineno - if lineno: - fields.append(format_instr(instr)) - line = "".join(fields) - line = format_line(offset, line) - else: - fields.append("% 3s %s" % (offset, format_instr(instr))) - line = "".join(fields) - print(line, file=stream) - - offset += instr.size - elif isinstance(bytecode, Bytecode): - labels = {} - for index, instr in enumerate(bytecode): - if isinstance(instr, Label): - labels[instr] = "label_instr%s" % index - - for index, instr in enumerate(bytecode): - if isinstance(instr, Label): - label = labels[instr] - line = "%s:" % label - if index != 0: - print(file=stream) - else: - if instr.lineno is not None: - cur_lineno = instr.lineno - line = format_instr(instr, labels) - line = indent + format_line(index, line) - print(line, file=stream) - print(file=stream) - elif isinstance(bytecode, ControlFlowGraph): - labels = {} - for block_index, block in enumerate(bytecode, 1): - labels[id(block)] = "block%s" % block_index - - for block_index, block in enumerate(bytecode, 1): - print("%s:" % labels[id(block)], file=stream) - prev_lineno = None - for index, instr in enumerate(block): - if instr.lineno is not None: - cur_lineno = instr.lineno - line = format_instr(instr, labels) - line = indent + format_line(index, line) - print(line, file=stream) - if block.next_block is not None: - print(indent + "-> %s" % labels[id(block.next_block)], file=stream) - print(file=stream) - else: - raise TypeError("unknown bytecode class") diff --git a/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/torch_utils.py b/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/torch_utils.py deleted file mode 100644 index 5e614b369a631d8829b5e189647f675aea1d2f46..0000000000000000000000000000000000000000 --- a/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/torch_utils.py +++ /dev/null @@ -1,66 +0,0 @@ -import warnings - -import torch -from torch import Tensor - - -@torch.jit.script -def torch_linspace(start: Tensor, stop: Tensor, num: int) -> torch.Tensor: - """ - Copy-pasted from https://github.com/pytorch/pytorch/issues/61292 - Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive. - Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch. - """ - # create a tensor of 'num' steps from 0 to 1 - steps = torch.arange(num, dtype=torch.float32, device=start.device) / (num - 1) - - # reshape the 'steps' tensor to [-1, *([1]*start.ndim)] to allow for broadcastings - # - using 'steps.reshape([-1, *([1]*start.ndim)])' would be nice here but torchscript - # "cannot statically infer the expected size of a list in this contex", hence the code below - for i in range(start.ndim): - steps = steps.unsqueeze(-1) - - # the output starts at 'start' and increments until 'stop' in each dimension - out = start[None] + steps * (stop - start)[None] - - return out - - -def load_weights( - model: torch.nn.Module, checkpoint: dict, strict=True -) -> torch.nn.Module: - """This function is used instead of the one provided by pytorch lightning - because for unexplained reasons, the pytorch lightning load function did - not behave as intended: loading several times from the same checkpoint - resulted in different loaded weight values... - - Args: - model: a model in which new weights should be set - checkpoint: a loaded pytorch checkpoint (probably resulting from torch.load(filename)) - strict: Default to True, wether to fail if - - """ - if not strict: - model_dict = model.state_dict() - pretrained_dict = { - k: v for k, v in checkpoint["state_dict"].items() if k in model_dict - } - diff1 = checkpoint["state_dict"].keys() - model_dict.keys() - if diff1: - warnings.warn( - f"Found keys {diff1} in checkpoint without any match in the model, ignoring corresponding values." - ) - diff2 = model_dict.keys() - checkpoint["state_dict"].keys() - if diff2: - warnings.warn( - f"Missing keys {diff2} from the checkpoint, the corresponding weights will keep their initial values." - ) - pretrained_dict = { - k: v for k, v in checkpoint["state_dict"].items() if k in model_dict - } - model_dict.update(pretrained_dict) - else: - model_dict = checkpoint["state_dict"] - - model.load_state_dict(model_dict, strict=strict) - return model diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py deleted file mode 100644 index ddfcf7f72f31658d75c8128de0732fbbf0e12b15..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Wrappers to call pyproject.toml-based build backend hooks. -""" - -from ._impl import ( - BackendInvalid, - BackendUnavailable, - BuildBackendHookCaller, - HookMissing, - UnsupportedOperation, - default_subprocess_runner, - quiet_subprocess_runner, -) - -__version__ = '1.0.0' -__all__ = [ - 'BackendUnavailable', - 'BackendInvalid', - 'HookMissing', - 'UnsupportedOperation', - 'default_subprocess_runner', - 'quiet_subprocess_runner', - 'BuildBackendHookCaller', -] diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_anchor_generator.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_anchor_generator.py deleted file mode 100644 index 13a808e587382216da6fe7ee957603f448172657..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_anchor_generator.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import unittest -import torch - -from detectron2.config import get_cfg -from detectron2.layers import ShapeSpec -from detectron2.modeling.anchor_generator import DefaultAnchorGenerator, RotatedAnchorGenerator - -logger = logging.getLogger(__name__) - - -class TestAnchorGenerator(unittest.TestCase): - def test_default_anchor_generator(self): - cfg = get_cfg() - cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] - cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]] - - anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)]) - - # only the last two dimensions of features matter here - num_images = 2 - features = {"stage3": torch.rand(num_images, 96, 1, 2)} - anchors = anchor_generator([features["stage3"]]) - expected_anchor_tensor = torch.tensor( - [ - [-32.0, -8.0, 32.0, 8.0], - [-16.0, -16.0, 16.0, 16.0], - [-8.0, -32.0, 8.0, 32.0], - [-64.0, -16.0, 64.0, 16.0], - [-32.0, -32.0, 32.0, 32.0], - [-16.0, -64.0, 16.0, 64.0], - [-28.0, -8.0, 36.0, 8.0], # -28.0 == -32.0 + STRIDE (4) - [-12.0, -16.0, 20.0, 16.0], - [-4.0, -32.0, 12.0, 32.0], - [-60.0, -16.0, 68.0, 16.0], - [-28.0, -32.0, 36.0, 32.0], - [-12.0, -64.0, 20.0, 64.0], - ] - ) - - self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor)) - - def test_default_anchor_generator_centered(self): - # test explicit args - anchor_generator = DefaultAnchorGenerator( - sizes=[32, 64], aspect_ratios=[0.25, 1, 4], strides=[4] - ) - - # only the last two dimensions of features matter here - num_images = 2 - features = {"stage3": torch.rand(num_images, 96, 1, 2)} - expected_anchor_tensor = torch.tensor( - [ - [-30.0, -6.0, 34.0, 10.0], - [-14.0, -14.0, 18.0, 18.0], - [-6.0, -30.0, 10.0, 34.0], - [-62.0, -14.0, 66.0, 18.0], - [-30.0, -30.0, 34.0, 34.0], - [-14.0, -62.0, 18.0, 66.0], - [-26.0, -6.0, 38.0, 10.0], - [-10.0, -14.0, 22.0, 18.0], - [-2.0, -30.0, 14.0, 34.0], - [-58.0, -14.0, 70.0, 18.0], - [-26.0, -30.0, 38.0, 34.0], - [-10.0, -62.0, 22.0, 66.0], - ] - ) - - anchors = anchor_generator([features["stage3"]]) - self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor)) - - anchors = torch.jit.script(anchor_generator)([features["stage3"]]) - self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor)) - - def test_rrpn_anchor_generator(self): - cfg = get_cfg() - cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] - cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]] - cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [0, 45] # test single list[float] - anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)]) - - # only the last two dimensions of features matter here - num_images = 2 - features = {"stage3": torch.rand(num_images, 96, 1, 2)} - anchors = anchor_generator([features["stage3"]]) - expected_anchor_tensor = torch.tensor( - [ - [0.0, 0.0, 64.0, 16.0, 0.0], - [0.0, 0.0, 64.0, 16.0, 45.0], - [0.0, 0.0, 32.0, 32.0, 0.0], - [0.0, 0.0, 32.0, 32.0, 45.0], - [0.0, 0.0, 16.0, 64.0, 0.0], - [0.0, 0.0, 16.0, 64.0, 45.0], - [0.0, 0.0, 128.0, 32.0, 0.0], - [0.0, 0.0, 128.0, 32.0, 45.0], - [0.0, 0.0, 64.0, 64.0, 0.0], - [0.0, 0.0, 64.0, 64.0, 45.0], - [0.0, 0.0, 32.0, 128.0, 0.0], - [0.0, 0.0, 32.0, 128.0, 45.0], - [4.0, 0.0, 64.0, 16.0, 0.0], # 4.0 == 0.0 + STRIDE (4) - [4.0, 0.0, 64.0, 16.0, 45.0], - [4.0, 0.0, 32.0, 32.0, 0.0], - [4.0, 0.0, 32.0, 32.0, 45.0], - [4.0, 0.0, 16.0, 64.0, 0.0], - [4.0, 0.0, 16.0, 64.0, 45.0], - [4.0, 0.0, 128.0, 32.0, 0.0], - [4.0, 0.0, 128.0, 32.0, 45.0], - [4.0, 0.0, 64.0, 64.0, 0.0], - [4.0, 0.0, 64.0, 64.0, 45.0], - [4.0, 0.0, 32.0, 128.0, 0.0], - [4.0, 0.0, 32.0, 128.0, 45.0], - ] - ) - - self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor)) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/ThirdEyeData/Network_Data_Anomaly/README.md b/spaces/ThirdEyeData/Network_Data_Anomaly/README.md deleted file mode 100644 index 2331ac816a9361eff4741f46123f44b7d0570710..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Network_Data_Anomaly/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Network Data Anomaly -emoji: 🐢 -colorFrom: yellow -colorTo: red -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/style.css b/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/style.css deleted file mode 100644 index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000 --- a/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/style.css +++ /dev/null @@ -1,3 +0,0 @@ -h1 { - text-align: center; -} diff --git a/spaces/TushDeMort/yolo/weights.py b/spaces/TushDeMort/yolo/weights.py deleted file mode 100644 index 98e3bb4253e7be0465048f9748b58edc6d1c5b4a..0000000000000000000000000000000000000000 --- a/spaces/TushDeMort/yolo/weights.py +++ /dev/null @@ -1,21 +0,0 @@ -import requests -import os.path - -def weights(): - - path = 'models/yolov7.pt' - - check_file = os.path.isfile(path) - - if check_file == True: - pass - else: - URL = "https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt" - response = requests.get(URL) - open(path, "wb").write(response.content) - -if __name__ == "__main__": - try: - weights() - except Exception as e: - print(f'error is {e}') diff --git a/spaces/TwoCH4/White-box-Cartoonization/wbc/cartoonize.py b/spaces/TwoCH4/White-box-Cartoonization/wbc/cartoonize.py deleted file mode 100644 index 25faf1ceb95aaed9a3f7a7982d17a03dc6bc32b1..0000000000000000000000000000000000000000 --- a/spaces/TwoCH4/White-box-Cartoonization/wbc/cartoonize.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import cv2 -import numpy as np -import tensorflow as tf -import wbc.network as network -import wbc.guided_filter as guided_filter -from tqdm import tqdm - - -def resize_crop(image): - h, w, c = np.shape(image) - if min(h, w) > 720: - if h > w: - h, w = int(720 * h / w), 720 - else: - h, w = 720, int(720 * w / h) - image = cv2.resize(image, (w, h), - interpolation=cv2.INTER_AREA) - h, w = (h // 8) * 8, (w // 8) * 8 - image = image[:h, :w, :] - return image - - -def cartoonize(load_folder, save_folder, model_path): - print(model_path) - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(input_photo) - final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - - sess.run(tf.global_variables_initializer()) - saver.restore(sess, tf.train.latest_checkpoint(model_path)) - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = sess.run(final_out, feed_dict={input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -class Cartoonize: - def __init__(self, model_path): - print(model_path) - self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(self.input_photo) - self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - self.sess = tf.Session(config=config) - - self.sess.run(tf.global_variables_initializer()) - saver.restore(self.sess, tf.train.latest_checkpoint(model_path)) - - def run(self, load_folder, save_folder): - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - def run_sigle(self, load_path, save_path): - try: - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -if __name__ == '__main__': - model_path = 'saved_models' - load_folder = 'test_images' - save_folder = 'cartoonized_images' - if not os.path.exists(save_folder): - os.mkdir(save_folder) - cartoonize(load_folder, save_folder, model_path) diff --git a/spaces/Ukrania/RVC-Models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/spaces/Ukrania/RVC-Models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index ee3171bcb7c4a5066560723108b56e055f18be45..0000000000000000000000000000000000000000 --- a/spaces/Ukrania/RVC-Models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,90 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class DioF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/Vasanthgx/Cats_vs_Dogs_vasanth/README.md b/spaces/Vasanthgx/Cats_vs_Dogs_vasanth/README.md deleted file mode 100644 index f5e718271757ae76312a4976dfdc8b744fa240b9..0000000000000000000000000000000000000000 --- a/spaces/Vasanthgx/Cats_vs_Dogs_vasanth/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Cats Vs Dogs Vasanth -emoji: 🚀 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Vegecken/sovits4dzl/modules/mel_processing.py b/spaces/Vegecken/sovits4dzl/modules/mel_processing.py deleted file mode 100644 index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000 --- a/spaces/Vegecken/sovits4dzl/modules/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/VideoCrafter/VideoCrafter/demo_test.py b/spaces/VideoCrafter/VideoCrafter/demo_test.py deleted file mode 100644 index 759039753e6c8caa3eb5507690e68a07e50023c7..0000000000000000000000000000000000000000 --- a/spaces/VideoCrafter/VideoCrafter/demo_test.py +++ /dev/null @@ -1,16 +0,0 @@ -class Text2Video(): - def __init__(self, result_dir='./tmp/') -> None: - pass - - def get_prompt(self, input_text, steps=50, cfg_scale=15.0, eta=1.0, fps=16): - - return '01.mp4' - -class Image2Video: - def __init__(self, result_dir='./tmp/') -> None: - pass - - def get_image(self, input_image, input_prompt, i2v_steps=50, i2v_cfg_scale=15.0, i2v_eta=1.0, i2v_fps=16): - - return '01.mp4' - \ No newline at end of file diff --git a/spaces/Writer/token-counter/README.md b/spaces/Writer/token-counter/README.md deleted file mode 100644 index 45b4051524dd319c765367c7c1c118f11157bbaf..0000000000000000000000000000000000000000 --- a/spaces/Writer/token-counter/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Token Counter -emoji: 📈 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/general_optimizer.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/general_optimizer.py deleted file mode 100644 index f6a0487d582fe6264627d302d6580364affdf754..0000000000000000000000000000000000000000 --- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/general_optimizer.py +++ /dev/null @@ -1,139 +0,0 @@ -from .torch_core import * -from torch.optim import Optimizer -import types - -__all__ = ['StatScope', 'Statistic', 'ConstStatistic', 'AvgStatistic', 'AvgSquare', 'GeneralOptimizer'] - -StatScope = Enum('StatScope', 'Global Group Layer Channel Weight') - -@dataclass -class Statistic(): - name:str - param:float=0.9 # e.g. for exp moving average - scope:StatScope=StatScope.Weight - init:float=0. # starting value - - @property - def buf(self): return f'{self.name}_buffer' - - def new_step(self): - "Set state when computing statistics for Global or Group" - raise NotImplementedError - - def accumulate(self, val): - "Add `val` to statistic" - raise NotImplementedError - - def update(self, state, param, val=None, step=None): - "Update state with accumlated, or `val` (if `Weight` or `Layer` scope)" - raise NotImplementedError - -class ConstStatistic(Statistic): - @property - def buf(self): return None - def new_step(self): pass - def accumulate(self): pass - def update(self, state, param, val=None, step=None): return param - -@dataclass -class CounterStat(Statistic): - def __post_init__(self): self.init,self._buf,self.name = 0,self.name,None - @property - def buf(self): return self._buf - def new_step(self): pass - def accumulate(self, val): pass - def update(self, state, param, val=None, step=None): return state + 1 - -@dataclass -class AvgStatistic(Statistic): - decay:bool=False - debias:bool=False - def new_step(self): self.val,self.count = 0.,0 - - def accumulate(self, val): - self.count += 1 - self.val += self._get_val1(val) - - def _get_val1(self, val): return val.mean() - def _get_val2(self, state, val, param): return state.add_(1-param, val) if self.decay else state.add_(val) - def _get_val3(self, state, val, param): - v = val.view(val.size(0), -1).mean(1) - return state.add_(1-param, v) if self.decay else state.add_(v) - - def update(self, state, param, val=None, step=None): - if self.scope == StatScope.Weight: - # `state` is a tensor - res = self._get_val2(state.mul_(param), val, param) - elif self.scope == StatScope.Channel: - # `state` is a tensor of size n_channels - res = self._get_val3(state.mul_(param), val, param) - # For everything else, `state` is a scalar - elif self.scope == StatScope.Layer: res = state*param + self._get_val1(val) * (1-param if self.decay else 1.) - elif self.count != 0: res = state*param + self.val/self.count * (1-param if self.decay else 1.) - else: return state - if self.debias and step is not None: res /= (1 - param ** step) - return res - -class AvgSquare(AvgStatistic): - - def __init__(self, name:str, param:float=0.9, scope=StatScope.Weight, init:float=0., decay:bool=True, debias:bool=False): - super().__init__(name, param=param, scope=scope, init=init, decay=decay, debias=debias) - - def _get_val1(self, val): return torch.norm(val).pow(2)/val.numel() - def _get_val2(self, state, val, param): - return state.addcmul_(1-param, val, val) if self.decay else state.addcmul_(val, val) - def _get_val3(self, state, val, param): - v = val.view(val.size(0), -1).mean(1) - return state.addcmul_(1-param, v, v) if self.decay else state.addcmul_(v, v) - -class GeneralOptimizer(Optimizer): - def __init__(self, params, stats=None, on_step:Callable=None): - defaults = {s.name:s.param for s in listify(stats) if s.name is not None} - super().__init__(params, defaults) - self.global_stats,self.group_stats,self.layer_stats,self.channel_stats,self.weight_stats = self._split_stats(stats) - self.init_stats() - if on_step is not None: self.on_step = types.MethodType(on_step, self) - - def step(self, closure=None): - self.update_stats() - for i,pg in enumerate(self.param_groups): - for p in pg['params']: - if p.grad is not None: self.on_step(p, pg, i) - - def on_step(self, p, group, group_idx): p.data.add_(-group['lr'], p.grad.data) - - def _split_stats(self, stats): - splits = [[stat for stat in listify(stats) if stat.scope==scope] for scope in StatScope] - for split,s in zip([splits[0], splits[1], splits[2]+splits[3]+splits[4]], StatScope): - if np.any([getattr(s, 'debias', False) for s in split]): split.insert(0, CounterStat('step', scope=s)) - return splits - - def _init_stats(self, stats, data=None): - return {stat.buf: stat.init if data is None - else torch.zeros_like(data) + stat.init for stat in stats if stat.buf is not None} - - def init_stats(self): - self.state['global'] = self._init_stats(self.global_stats) - for i,pg in enumerate(self.param_groups): - self.state[f'group{i}'] = self._init_stats(self.group_stats) - for p in pg['params']: - self.state[p] = self._init_stats(self.layer_stats) - self.state[p].update(self._init_stats(self.channel_stats, p.data.view(p.data.size(0), -1).mean(1))) - self.state[p].update(self._init_stats(self.weight_stats, p.data)) - - def _set_bufs(self, p, stats, pg, val=None): - d = self.state[p] - for stat in stats: - if stat.buf is not None: d[stat.buf] = stat.update(d[stat.buf], pg[stat.name], val=val, step=d.get('step', None)) - - def update_stats(self): - for stat in self.global_stats: stat.new_step() - for i,pg in enumerate(self.param_groups): - for stat in self.group_stats: stat.new_step() - for p in pg['params']: - if p.grad is not None: - for stat in self.global_stats + self.group_stats: stat.accumulate(p.grad.data) - self._set_bufs(p, self.layer_stats+self.channel_stats+self.weight_stats, pg, p.grad.data) - self._set_bufs(f'group{i}', self.group_stats, pg) - self._set_bufs('global', self.global_stats, self.param_groups[0]) - diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/torch_core.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/torch_core.py deleted file mode 100644 index 6b089e09e4e08c2b6d50b70ef3223fadae2f48cb..0000000000000000000000000000000000000000 --- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/torch_core.py +++ /dev/null @@ -1,430 +0,0 @@ -"Utility functions to help deal with tensors" -from .imports.torch import * -from .core import * -from collections import OrderedDict -from torch.nn.parallel import DistributedDataParallel - -AffineMatrix = Tensor -BoolOrTensor = Union[bool,Tensor] -FloatOrTensor = Union[float,Tensor] -IntOrTensor = Union[int,Tensor] -ItemsList = Collection[Union[Tensor,ItemBase,'ItemsList',float,int]] -LambdaFunc = Callable[[Tensor],Tensor] -LayerFunc = Callable[[nn.Module],None] -ModuleList = Collection[nn.Module] -NPArray = np.ndarray -OptOptimizer = Optional[optim.Optimizer] -ParamList = Collection[nn.Parameter] -Rank0Tensor = NewType('OneEltTensor', Tensor) -SplitFunc = Callable[[nn.Module], List[nn.Module]] -SplitFuncOrIdxList = Union[Callable, Collection[ModuleList]] -TensorOrNumber = Union[Tensor,Number] -TensorOrNumList = Collection[TensorOrNumber] -TensorImage = Tensor -TensorImageSize = Tuple[int,int,int] -Tensors = Union[Tensor, Collection['Tensors']] -Weights = Dict[str,Tensor] - -AffineFunc = Callable[[KWArgs], AffineMatrix] -HookFunc = Callable[[nn.Module, Tensors, Tensors], Any] -LogitTensorImage = TensorImage -LossFunction = Callable[[Tensor, Tensor], Rank0Tensor] -MetricFunc = Callable[[Tensor,Tensor],TensorOrNumber] -MetricFuncList = Collection[MetricFunc] -MetricsList = Collection[TensorOrNumber] -OptLossFunc = Optional[LossFunction] -OptMetrics = Optional[MetricsList] -OptSplitFunc = Optional[SplitFunc] -PixelFunc = Callable[[TensorImage, ArgStar, KWArgs], TensorImage] - -LightingFunc = Callable[[LogitTensorImage, ArgStar, KWArgs], LogitTensorImage] - -fastai_types = { - AnnealFunc:'AnnealFunc', ArgStar:'ArgStar', BatchSamples:'BatchSamples', - FilePathList:'FilePathList', Floats:'Floats', ImgLabel:'ImgLabel', ImgLabels:'ImgLabels', KeyFunc:'KeyFunc', - KWArgs:'KWArgs', ListOrItem:'ListOrItem', ListRules:'ListRules', ListSizes:'ListSizes', - NPArrayableList:'NPArrayableList', NPArrayList:'NPArrayList', NPArrayMask:'NPArrayMask', NPImage:'NPImage', - OptDataFrame:'OptDataFrame', OptListOrItem:'OptListOrItem', OptRange:'OptRange', OptStrTuple:'OptStrTuple', - OptStats:'OptStats', PathOrStr:'PathOrStr', PBar:'PBar', Point:'Point', Points:'Points', Sizes:'Sizes', - SplitArrayList:'SplitArrayList', StartOptEnd:'StartOptEnd', StrList:'StrList', Tokens:'Tokens', - OptStrList:'OptStrList', AffineMatrix:'AffineMatrix', BoolOrTensor:'BoolOrTensor', FloatOrTensor:'FloatOrTensor', - IntOrTensor:'IntOrTensor', ItemsList:'ItemsList', LambdaFunc:'LambdaFunc', - LayerFunc:'LayerFunc', ModuleList:'ModuleList', OptOptimizer:'OptOptimizer', ParamList:'ParamList', - Rank0Tensor:'Rank0Tensor', SplitFunc:'SplitFunc', SplitFuncOrIdxList:'SplitFuncOrIdxList', - TensorOrNumber:'TensorOrNumber', TensorOrNumList:'TensorOrNumList', TensorImage:'TensorImage', - TensorImageSize:'TensorImageSize', Tensors:'Tensors', Weights:'Weights', AffineFunc:'AffineFunc', - HookFunc:'HookFunc', LogitTensorImage:'LogitTensorImage', LossFunction:'LossFunction', MetricFunc:'MetricFunc', - MetricFuncList:'MetricFuncList', MetricsList:'MetricsList', OptLossFunc:'OptLossFunc', OptMetrics:'OptMetrics', - OptSplitFunc:'OptSplitFunc', PixelFunc:'PixelFunc', LightingFunc:'LightingFunc', IntsOrStrs:'IntsOrStrs', - PathLikeOrBinaryStream:'PathLikeOrBinaryStream' -} - -bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d) -bias_types = (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d) -def is_pool_type(l:Callable): return re.search(r'Pool[123]d$', l.__class__.__name__) -no_wd_types = bn_types + (nn.LayerNorm,) -defaults.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') -AdamW = partial(optim.Adam, betas=(0.9,0.99)) - -#Monkey-patch `torch.cuda.set_device` so that it updates `defaults.device` -_old_torch_cuda_set_device = torch.cuda.set_device -def _new_torch_cuda_set_device(device): - _old_torch_cuda_set_device(device) - defaults.device = torch.device('cuda', device) if isinstance(device, int) else device -torch.cuda.set_device = _new_torch_cuda_set_device - -def tensor(x:Any, *rest)->Tensor: - "Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly." - if len(rest): x = (x,)+rest - # XXX: Pytorch bug in dataloader using num_workers>0; TODO: create repro and report - if is_listy(x) and len(x)==0: return tensor(0) - res = torch.tensor(x) if is_listy(x) else as_tensor(x) - if res.dtype is torch.int32: - warn('Tensor is int32: upgrading to int64; for better performance use int64 input') - return res.long() - return res - -class Module(nn.Module, metaclass=PrePostInitMeta): - "Same as `nn.Module`, but no need for subclasses to call `super().__init__`" - def __pre_init__(self): super().__init__() - def __init__(self): pass - -def np_address(x:np.ndarray)->int: - "Address of `x` in memory." - return x.__array_interface__['data'][0] - -def to_detach(b:Tensors, cpu:bool=True): - "Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`." - def _inner(x, cpu=True): - if not isinstance(x,Tensor): return x - x = x.detach() - return x.cpu() if cpu else x - return recurse(_inner, b, cpu=cpu) - -def to_data(b:ItemsList): - "Recursively map lists of items in `b ` to their wrapped data." - return recurse(lambda x: x.data if isinstance(x,ItemBase) else x, b) - -def to_cpu(b:ItemsList): - "Recursively map lists of tensors in `b ` to the cpu." - return recurse(lambda x: x.cpu() if isinstance(x,Tensor) else x, b) - -def to_half(b:Collection[Tensor])->Collection[Tensor]: - "Recursively map lists of tensors in `b ` to FP16." - return recurse(lambda x: x.half() if x.dtype not in [torch.int64, torch.int32, torch.int16] else x, b) - -def to_float(b:Collection[Tensor])->Collection[Tensor]: - "Recursively map lists of tensors in `b ` to FP16." - return recurse(lambda x: x.float() if x.dtype not in [torch.int64, torch.int32, torch.int16] else x, b) - -def to_device(b:Tensors, device:torch.device): - "Recursively put `b` on `device`." - device = ifnone(device, defaults.device) - return recurse(lambda x: x.to(device, non_blocking=True), b) - -def data_collate(batch:ItemsList)->Tensor: - "Convert `batch` items to tensor data." - return torch.utils.data.dataloader.default_collate(to_data(batch)) - -def requires_grad(m:nn.Module, b:Optional[bool]=None)->Optional[bool]: - "If `b` is not set return `requires_grad` of first param, else set `requires_grad` on all params as `b`" - ps = list(m.parameters()) - if not ps: return None - if b is None: return ps[0].requires_grad - for p in ps: p.requires_grad=b - -def trainable_params(m:nn.Module)->ParamList: - "Return list of trainable params in `m`." - res = filter(lambda p: p.requires_grad, m.parameters()) - return res - -def children(m:nn.Module)->ModuleList: - "Get children of `m`." - return list(m.children()) - -def num_children(m:nn.Module)->int: - "Get number of children modules in `m`." - return len(children(m)) - -def range_children(m:nn.Module)->Iterator[int]: - "Return iterator of len of children of `m`." - return range(num_children(m)) - -class ParameterModule(Module): - "Register a lone parameter `p` in a module." - def __init__(self, p:nn.Parameter): self.val = p - def forward(self, x): return x - -def children_and_parameters(m:nn.Module): - "Return the children of `m` and its direct parameters not registered in modules." - children = list(m.children()) - children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[]) - for p in m.parameters(): - if id(p) not in children_p: children.append(ParameterModule(p)) - return children - -def flatten_model(m:nn.Module): - if num_children(m): - mapped = map(flatten_model,children_and_parameters(m)) - return sum(mapped,[]) - else: - return [m] - -#flatten_model = lambda m: sum(map(flatten_model,children_and_parameters(m)),[]) if num_children(m) else [m] - -def first_layer(m:nn.Module)->nn.Module: - "Retrieve first layer in a module `m`." - return flatten_model(m)[0] - -def last_layer(m:nn.Module)->nn.Module: - "Retrieve last layer in a module `m`." - return flatten_model(m)[-1] - -def split_model_idx(model:nn.Module, idxs:Collection[int])->ModuleList: - "Split `model` according to the indexes in `idxs`." - layers = flatten_model(model) - if idxs[0] != 0: idxs = [0] + idxs - if idxs[-1] != len(layers): idxs.append(len(layers)) - return [nn.Sequential(*layers[i:j]) for i,j in zip(idxs[:-1],idxs[1:])] - -def split_model(model:nn.Module=None, splits:Collection[Union[nn.Module,ModuleList]]=None): - "Split `model` according to the layers in `splits`." - splits = listify(splits) - if isinstance(splits[0], nn.Module): - layers = flatten_model(model) - idxs = [layers.index(first_layer(s)) for s in splits] - return split_model_idx(model, idxs) - return [nn.Sequential(*s) for s in splits] - -def get_param_groups(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]: - return [sum([list(trainable_params(c)) for c in l.children()], []) for l in layer_groups] - -def split_no_wd_params(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]: - "Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest." - split_params = [] - for l in layer_groups: - l1,l2 = [],[] - for c in l.children(): - if isinstance(c, no_wd_types): l2 += list(trainable_params(c)) - elif isinstance(c, bias_types): - bias = c.bias if hasattr(c, 'bias') else None - l1 += [p for p in trainable_params(c) if not (p is bias)] - if bias is not None: l2.append(bias) - else: l1 += list(trainable_params(c)) - #Since we scan the children separately, we might get duplicates (tied weights). We need to preserve the order - #for the optimizer load of state_dict - l1,l2 = uniqueify(l1),uniqueify(l2) - split_params += [l1, l2] - return split_params - -def set_bn_eval(m:nn.Module)->None: - "Set bn layers in eval mode for all recursive children of `m`." - for l in m.children(): - if isinstance(l, bn_types) and not next(l.parameters()).requires_grad: - l.eval() - set_bn_eval(l) - -def batch_to_half(b:Collection[Tensor])->Collection[Tensor]: - "Set the input of batch `b` to half precision." - return [to_half(b[0]), b[1]] - -def bn2float(module:nn.Module)->nn.Module: - "If `module` is batchnorm don't use half precision." - if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): module.float() - for child in module.children(): bn2float(child) - return module - -def model2half(model:nn.Module)->nn.Module: - "Convert `model` to half precision except the batchnorm layers." - return bn2float(model.half()) - -def init_default(m:nn.Module, func:LayerFunc=nn.init.kaiming_normal_)->nn.Module: - "Initialize `m` weights with `func` and set `bias` to 0." - if func: - if hasattr(m, 'weight'): func(m.weight) - if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.) - return m - -def cond_init(m:nn.Module, init_func:LayerFunc): - "Initialize the non-batchnorm layers of `m` with `init_func`." - if (not isinstance(m, bn_types)) and requires_grad(m): init_default(m, init_func) - -def apply_leaf(m:nn.Module, f:LayerFunc): - "Apply `f` to children of `m`." - c = children(m) - if isinstance(m, nn.Module): f(m) - for l in c: apply_leaf(l,f) - -def apply_init(m, init_func:LayerFunc): - "Initialize all non-batchnorm layers of `m` with `init_func`." - apply_leaf(m, partial(cond_init, init_func=init_func)) - -def in_channels(m:nn.Module) -> List[int]: - "Return the shape of the first weight layer in `m`." - for l in flatten_model(m): - if hasattr(l, 'weight'): return l.weight.shape[1] - raise Exception('No weight layer') - -class ModelOnCPU(): - "A context manager to evaluate `model` on the CPU inside." - def __init__(self, model:nn.Module): self.model = model - def __enter__(self): - self.device = one_param(self.model).device - return self.model.cpu() - def __exit__(self, type, value, traceback): - self.model = self.model.to(self.device) - -class NoneReduceOnCPU(): - "A context manager to evaluate `loss_func` with none reduce and weights on the CPU inside." - def __init__(self, loss_func:LossFunction): - self.loss_func,self.device,self.old_red = loss_func,None,None - - def __enter__(self): - if hasattr(self.loss_func, 'weight') and self.loss_func.weight is not None: - self.device = self.loss_func.weight.device - self.loss_func.weight = self.loss_func.weight.cpu() - if hasattr(self.loss_func, 'reduction'): - self.old_red = getattr(self.loss_func, 'reduction') - setattr(self.loss_func, 'reduction', 'none') - return self.loss_func - else: return partial(self.loss_func, reduction='none') - - def __exit__(self, type, value, traceback): - if self.device is not None: self.loss_func.weight = self.loss_func.weight.to(self.device) - if self.old_red is not None: setattr(self.loss_func, 'reduction', self.old_red) - -def model_type(dtype): - "Return the torch type corresponding to `dtype`." - return (torch.float32 if np.issubdtype(dtype, np.floating) else - torch.int64 if np.issubdtype(dtype, np.integer) - else None) - -def np2model_tensor(a): - "Tranform numpy array `a` to a tensor of the same type." - dtype = model_type(a.dtype) - res = as_tensor(a) - if not dtype: return res - return res.type(dtype) - -def _pca(x, k=2): - "Compute PCA of `x` with `k` dimensions." - x = x-torch.mean(x,0) - U,S,V = torch.svd(x.t()) - return torch.mm(x,U[:,:k]) -torch.Tensor.pca = _pca - -def trange_of(x): - "Create a tensor from `range_of(x)`." - return torch.arange(len(x)) - -def to_np(x): - "Convert a tensor to a numpy array." - return x.data.cpu().numpy() - -# monkey patching to allow matplotlib to plot tensors -def tensor__array__(self, dtype=None): - res = to_np(self) - if dtype is None: return res - else: return res.astype(dtype, copy=False) -Tensor.__array__ = tensor__array__ -Tensor.ndim = property(lambda x: len(x.shape)) - -def grab_idx(x,i,batch_first:bool=True): - "Grab the `i`-th batch in `x`, `batch_first` stating the batch dimension." - if batch_first: return ([o[i].cpu() for o in x] if is_listy(x) else x[i].cpu()) - else: return ([o[:,i].cpu() for o in x] if is_listy(x) else x[:,i].cpu()) - -def logit(x:Tensor)->Tensor: - "Logit of `x`, clamped to avoid inf." - x = x.clamp(1e-7, 1-1e-7) - return -(1/x-1).log() - -def logit_(x:Tensor)->Tensor: - "Inplace logit of `x`, clamped to avoid inf" - x.clamp_(1e-7, 1-1e-7) - return (x.reciprocal_().sub_(1)).log_().neg_() - -def set_all_seed(seed:int)->None: - "Sets the seeds for all pseudo random generators in fastai lib" - np.random.seed(seed) - torch.manual_seed(seed) - random.seed(seed) - -def uniform(low:Number, high:Number=None, size:Optional[List[int]]=None)->FloatOrTensor: - "Draw 1 or shape=`size` random floats from uniform dist: min=`low`, max=`high`." - if high is None: high=low - return random.uniform(low,high) if size is None else torch.FloatTensor(*listify(size)).uniform_(low,high) - -def log_uniform(low, high, size:Optional[List[int]]=None)->FloatOrTensor: - "Draw 1 or shape=`size` random floats from uniform dist: min=log(`low`), max=log(`high`)." - res = uniform(log(low), log(high), size) - return exp(res) if size is None else res.exp_() - -def rand_bool(p:float, size:Optional[List[int]]=None)->BoolOrTensor: - "Draw 1 or shape=`size` random booleans (`True` occuring with probability `p`)." - return uniform(0,1,size)

IntOrTensor: - "Generate int or tensor `size` of ints between `low` and `high` (included)." - return random.randint(low,high) if size is None else torch.randint(low,high+1,size) - -def one_param(m: nn.Module)->Tensor: - "Return the first parameter of `m`." - return next(m.parameters()) - -def try_int(o:Any)->Any: - "Try to convert `o` to int, default to `o` if not possible." - # NB: single-item rank-1 array/tensor can be converted to int, but we don't want to do this - if isinstance(o, (np.ndarray,Tensor)): return o if o.ndim else int(o) - if isinstance(o, collections.abc.Sized) or getattr(o,'__array_interface__',False): return o - try: return int(o) - except: return o - -def get_model(model:nn.Module): - "Return the model maybe wrapped inside `model`." - return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model - -def flatten_check(out:Tensor, targ:Tensor) -> Tensor: - "Check that `out` and `targ` have the same number of elements and flatten them." - out,targ = out.contiguous().view(-1),targ.contiguous().view(-1) - assert len(out) == len(targ), f"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}." - return out,targ - -#Monkey-patch nn.DataParallel.reset -def _data_parallel_reset(self): - if hasattr(self.module, 'reset'): self.module.reset() -nn.DataParallel.reset = _data_parallel_reset - -def remove_module_load(state_dict): - """create new OrderedDict that does not contain `module.`""" - new_state_dict = OrderedDict() - for k, v in state_dict.items(): new_state_dict[k[7:]] = v - return new_state_dict - -def num_distrib(): - "Return the number of processes in distributed training (if applicable)." - return int(os.environ.get('WORLD_SIZE', 0)) - -def rank_distrib(): - "Return the distributed rank of this process (if applicable)." - return int(os.environ.get('RANK', 0)) - -def add_metrics(last_metrics:Collection[Rank0Tensor], mets:Union[Rank0Tensor, Collection[Rank0Tensor]]): - "Return a dictionary for updating `last_metrics` with `mets`." - last_metrics,mets = listify(last_metrics),listify(mets) - return {'last_metrics': last_metrics + mets} - -def try_save(state:Dict, path:Path=None, file:PathLikeOrBinaryStream=None): - target = open(path/file, 'wb') if is_pathlike(file) else file - try: torch.save(state, target) - except OSError as e: - raise Exception(f"{e}\n Can't write {path/file}. Pass an absolute writable pathlib obj `fname`.") - -def np_func(f): - "Convert a function taking and returning numpy arrays to one taking and returning tensors" - def _inner(*args, **kwargs): - nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args] - return tensor(f(*nargs, **kwargs)) - functools.update_wrapper(_inner, f) - return _inner - diff --git a/spaces/Xenova/whisper-web/assets/index-6480d07e.js b/spaces/Xenova/whisper-web/assets/index-6480d07e.js deleted file mode 100644 index fb450a2873f2a1ddd53eba5b492578045c92096e..0000000000000000000000000000000000000000 --- a/spaces/Xenova/whisper-web/assets/index-6480d07e.js +++ /dev/null @@ -1,47 +0,0 @@ -function Jd(e,t){for(var n=0;nr[l]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const o of l)if(o.type==="childList")for(const i of o.addedNodes)i.tagName==="LINK"&&i.rel==="modulepreload"&&r(i)}).observe(document,{childList:!0,subtree:!0});function n(l){const o={};return l.integrity&&(o.integrity=l.integrity),l.referrerPolicy&&(o.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?o.credentials="include":l.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(l){if(l.ep)return;l.ep=!0;const o=n(l);fetch(l.href,o)}})();function Zd(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var ec={exports:{}},lo={},tc={exports:{}},$={};/** - * @license React - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var jr=Symbol.for("react.element"),ep=Symbol.for("react.portal"),tp=Symbol.for("react.fragment"),np=Symbol.for("react.strict_mode"),rp=Symbol.for("react.profiler"),lp=Symbol.for("react.provider"),op=Symbol.for("react.context"),ip=Symbol.for("react.forward_ref"),up=Symbol.for("react.suspense"),sp=Symbol.for("react.memo"),ap=Symbol.for("react.lazy"),Ls=Symbol.iterator;function cp(e){return e===null||typeof e!="object"?null:(e=Ls&&e[Ls]||e["@@iterator"],typeof e=="function"?e:null)}var nc={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},rc=Object.assign,lc={};function Hn(e,t,n){this.props=e,this.context=t,this.refs=lc,this.updater=n||nc}Hn.prototype.isReactComponent={};Hn.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};Hn.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function oc(){}oc.prototype=Hn.prototype;function ku(e,t,n){this.props=e,this.context=t,this.refs=lc,this.updater=n||nc}var xu=ku.prototype=new oc;xu.constructor=ku;rc(xu,Hn.prototype);xu.isPureReactComponent=!0;var _s=Array.isArray,ic=Object.prototype.hasOwnProperty,Cu={current:null},uc={key:!0,ref:!0,__self:!0,__source:!0};function sc(e,t,n){var r,l={},o=null,i=null;if(t!=null)for(r in t.ref!==void 0&&(i=t.ref),t.key!==void 0&&(o=""+t.key),t)ic.call(t,r)&&!uc.hasOwnProperty(r)&&(l[r]=t[r]);var u=arguments.length-2;if(u===1)l.children=n;else if(1>>1,ee=N[j];if(0>>1;jl(b,A))tel(G,b)?(N[j]=G,N[te]=A,j=te):(N[j]=b,N[st]=A,j=st);else if(tel(G,A))N[j]=G,N[te]=A,j=te;else break e}}return F}function l(N,F){var A=N.sortIndex-F.sortIndex;return A!==0?A:N.id-F.id}if(typeof performance=="object"&&typeof performance.now=="function"){var o=performance;e.unstable_now=function(){return o.now()}}else{var i=Date,u=i.now();e.unstable_now=function(){return i.now()-u}}var s=[],a=[],c=1,d=null,h=3,y=!1,m=!1,v=!1,P=typeof setTimeout=="function"?setTimeout:null,p=typeof clearTimeout=="function"?clearTimeout:null,f=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function g(N){for(var F=n(a);F!==null;){if(F.callback===null)r(a);else if(F.startTime<=N)r(a),F.sortIndex=F.expirationTime,t(s,F);else break;F=n(a)}}function E(N){if(v=!1,g(N),!m)if(n(s)!==null)m=!0,ut(C);else{var F=n(a);F!==null&&bt(E,F.startTime-N)}}function C(N,F){m=!1,v&&(v=!1,p(_),_=-1),y=!0;var A=h;try{for(g(F),d=n(s);d!==null&&(!(d.expirationTime>F)||N&&!V());){var j=d.callback;if(typeof j=="function"){d.callback=null,h=d.priorityLevel;var ee=j(d.expirationTime<=F);F=e.unstable_now(),typeof ee=="function"?d.callback=ee:d===n(s)&&r(s),g(F)}else r(s);d=n(s)}if(d!==null)var Kt=!0;else{var st=n(a);st!==null&&bt(E,st.startTime-F),Kt=!1}return Kt}finally{d=null,h=A,y=!1}}var L=!1,T=null,_=-1,U=5,O=-1;function V(){return!(e.unstable_now()-ON||125j?(N.sortIndex=A,t(a,N),n(s)===null&&N===n(a)&&(v?(p(_),_=-1):v=!0,bt(E,A-j))):(N.sortIndex=ee,t(s,N),m||y||(m=!0,ut(C))),N},e.unstable_shouldYield=V,e.unstable_wrapCallback=function(N){var F=h;return function(){var A=h;h=F;try{return N.apply(this,arguments)}finally{h=A}}}})(dc);fc.exports=dc;var Ep=fc.exports;/** - * @license React - * react-dom.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var pc=w,Re=Ep;function k(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),wi=Object.prototype.hasOwnProperty,kp=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,Os={},Fs={};function xp(e){return wi.call(Fs,e)?!0:wi.call(Os,e)?!1:kp.test(e)?Fs[e]=!0:(Os[e]=!0,!1)}function Cp(e,t,n,r){if(n!==null&&n.type===0)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return r?!1:n!==null?!n.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function Tp(e,t,n,r){if(t===null||typeof t>"u"||Cp(e,t,n,r))return!0;if(r)return!1;if(n!==null)switch(n.type){case 3:return!t;case 4:return t===!1;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}function Se(e,t,n,r,l,o,i){this.acceptsBooleans=t===2||t===3||t===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=o,this.removeEmptyString=i}var fe={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){fe[e]=new Se(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];fe[t]=new Se(t,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){fe[e]=new Se(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){fe[e]=new Se(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){fe[e]=new Se(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){fe[e]=new Se(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){fe[e]=new Se(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){fe[e]=new Se(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){fe[e]=new Se(e,5,!1,e.toLowerCase(),null,!1,!1)});var Pu=/[\-:]([a-z])/g;function Lu(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(Pu,Lu);fe[t]=new Se(t,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(Pu,Lu);fe[t]=new Se(t,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(Pu,Lu);fe[t]=new Se(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){fe[e]=new Se(e,1,!1,e.toLowerCase(),null,!1,!1)});fe.xlinkHref=new Se("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){fe[e]=new Se(e,1,!1,e.toLowerCase(),null,!0,!0)});function _u(e,t,n,r){var l=fe.hasOwnProperty(t)?fe[t]:null;(l!==null?l.type!==0:r||!(2u||l[i]!==o[u]){var s=` -`+l[i].replace(" at new "," at ");return e.displayName&&s.includes("")&&(s=s.replace("",e.displayName)),s}while(1<=i&&0<=u);break}}}finally{Ao=!1,Error.prepareStackTrace=n}return(e=e?e.displayName||e.name:"")?ir(e):""}function Np(e){switch(e.tag){case 5:return ir(e.type);case 16:return ir("Lazy");case 13:return ir("Suspense");case 19:return ir("SuspenseList");case 0:case 2:case 15:return e=$o(e.type,!1),e;case 11:return e=$o(e.type.render,!1),e;case 1:return e=$o(e.type,!0),e;default:return""}}function xi(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case vn:return"Fragment";case gn:return"Portal";case Si:return"Profiler";case Ru:return"StrictMode";case Ei:return"Suspense";case ki:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case gc:return(e.displayName||"Context")+".Consumer";case hc:return(e._context.displayName||"Context")+".Provider";case Ou:var t=e.render;return e=e.displayName,e||(e=t.displayName||t.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case Fu:return t=e.displayName||null,t!==null?t:xi(e.type)||"Memo";case Tt:t=e._payload,e=e._init;try{return xi(e(t))}catch{}}return null}function Pp(e){var t=e.type;switch(e.tag){case 24:return"Cache";case 9:return(t.displayName||"Context")+".Consumer";case 10:return(t._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=t.render,e=e.displayName||e.name||"",t.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return t;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return xi(t);case 8:return t===Ru?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof t=="function")return t.displayName||t.name||null;if(typeof t=="string")return t}return null}function Bt(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function yc(e){var t=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(t==="checkbox"||t==="radio")}function Lp(e){var t=yc(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&typeof n<"u"&&typeof n.get=="function"&&typeof n.set=="function"){var l=n.get,o=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return l.call(this)},set:function(i){r=""+i,o.call(this,i)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(i){r=""+i},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}function Xr(e){e._valueTracker||(e._valueTracker=Lp(e))}function wc(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=yc(e)?e.checked?"true":"false":e.value),e=r,e!==n?(t.setValue(e),!0):!1}function Rl(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function Ci(e,t){var n=t.checked;return X({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:n??e._wrapperState.initialChecked})}function $s(e,t){var n=t.defaultValue==null?"":t.defaultValue,r=t.checked!=null?t.checked:t.defaultChecked;n=Bt(t.value!=null?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:t.type==="checkbox"||t.type==="radio"?t.checked!=null:t.value!=null}}function Sc(e,t){t=t.checked,t!=null&&_u(e,"checked",t,!1)}function Ti(e,t){Sc(e,t);var n=Bt(t.value),r=t.type;if(n!=null)r==="number"?(n===0&&e.value===""||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}t.hasOwnProperty("value")?Ni(e,t.type,n):t.hasOwnProperty("defaultValue")&&Ni(e,t.type,Bt(t.defaultValue)),t.checked==null&&t.defaultChecked!=null&&(e.defaultChecked=!!t.defaultChecked)}function Ds(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!(r!=="submit"&&r!=="reset"||t.value!==void 0&&t.value!==null))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}n=e.name,n!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,n!==""&&(e.name=n)}function Ni(e,t,n){(t!=="number"||Rl(e.ownerDocument)!==e)&&(n==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}var ur=Array.isArray;function Ln(e,t,n,r){if(e=e.options,t){t={};for(var l=0;l"+t.valueOf().toString()+"",t=Jr.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function Er(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&n.nodeType===3){n.nodeValue=t;return}}e.textContent=t}var fr={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},_p=["Webkit","ms","Moz","O"];Object.keys(fr).forEach(function(e){_p.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),fr[t]=fr[e]})});function Cc(e,t,n){return t==null||typeof t=="boolean"||t===""?"":n||typeof t!="number"||t===0||fr.hasOwnProperty(e)&&fr[e]?(""+t).trim():t+"px"}function Tc(e,t){e=e.style;for(var n in t)if(t.hasOwnProperty(n)){var r=n.indexOf("--")===0,l=Cc(n,t[n],r);n==="float"&&(n="cssFloat"),r?e.setProperty(n,l):e[n]=l}}var Rp=X({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function _i(e,t){if(t){if(Rp[e]&&(t.children!=null||t.dangerouslySetInnerHTML!=null))throw Error(k(137,e));if(t.dangerouslySetInnerHTML!=null){if(t.children!=null)throw Error(k(60));if(typeof t.dangerouslySetInnerHTML!="object"||!("__html"in t.dangerouslySetInnerHTML))throw Error(k(61))}if(t.style!=null&&typeof t.style!="object")throw Error(k(62))}}function Ri(e,t){if(e.indexOf("-")===-1)return typeof t.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var Oi=null;function Au(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var Fi=null,_n=null,Rn=null;function js(e){if(e=Br(e)){if(typeof Fi!="function")throw Error(k(280));var t=e.stateNode;t&&(t=ao(t),Fi(e.stateNode,e.type,t))}}function Nc(e){_n?Rn?Rn.push(e):Rn=[e]:_n=e}function Pc(){if(_n){var e=_n,t=Rn;if(Rn=_n=null,js(e),t)for(e=0;e>>=0,e===0?32:31-(Bp(e)/Hp|0)|0}var Zr=64,el=4194304;function sr(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function $l(e,t){var n=e.pendingLanes;if(n===0)return 0;var r=0,l=e.suspendedLanes,o=e.pingedLanes,i=n&268435455;if(i!==0){var u=i&~l;u!==0?r=sr(u):(o&=i,o!==0&&(r=sr(o)))}else i=n&~l,i!==0?r=sr(i):o!==0&&(r=sr(o));if(r===0)return 0;if(t!==0&&t!==r&&!(t&l)&&(l=r&-r,o=t&-t,l>=o||l===16&&(o&4194240)!==0))return t;if(r&4&&(r|=n&16),t=e.entangledLanes,t!==0)for(e=e.entanglements,t&=r;0n;n++)t.push(e);return t}function Ur(e,t,n){e.pendingLanes|=t,t!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,t=31-Ge(t),e[t]=n}function bp(e,t){var n=e.pendingLanes&~t;e.pendingLanes=t,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=t,e.mutableReadLanes&=t,e.entangledLanes&=t,t=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=pr),Ks=String.fromCharCode(32),Gs=!1;function Kc(e,t){switch(e){case"keyup":return Sm.indexOf(t.keyCode)!==-1;case"keydown":return t.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Gc(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var yn=!1;function km(e,t){switch(e){case"compositionend":return Gc(t);case"keypress":return t.which!==32?null:(Gs=!0,Ks);case"textInput":return e=t.data,e===Ks&&Gs?null:e;default:return null}}function xm(e,t){if(yn)return e==="compositionend"||!Bu&&Kc(e,t)?(e=Qc(),gl=ju=Ot=null,yn=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1=t)return{node:n,offset:t-e};e=r}e:{for(;n;){if(n.nextSibling){n=n.nextSibling;break e}n=n.parentNode}n=void 0}n=Js(n)}}function Jc(e,t){return e&&t?e===t?!0:e&&e.nodeType===3?!1:t&&t.nodeType===3?Jc(e,t.parentNode):"contains"in e?e.contains(t):e.compareDocumentPosition?!!(e.compareDocumentPosition(t)&16):!1:!1}function Zc(){for(var e=window,t=Rl();t instanceof e.HTMLIFrameElement;){try{var n=typeof t.contentWindow.location.href=="string"}catch{n=!1}if(n)e=t.contentWindow;else break;t=Rl(e.document)}return t}function Hu(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&(t==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||t==="textarea"||e.contentEditable==="true")}function Fm(e){var t=Zc(),n=e.focusedElem,r=e.selectionRange;if(t!==n&&n&&n.ownerDocument&&Jc(n.ownerDocument.documentElement,n)){if(r!==null&&Hu(n)){if(t=r.start,e=r.end,e===void 0&&(e=t),"selectionStart"in n)n.selectionStart=t,n.selectionEnd=Math.min(e,n.value.length);else if(e=(t=n.ownerDocument||document)&&t.defaultView||window,e.getSelection){e=e.getSelection();var l=n.textContent.length,o=Math.min(r.start,l);r=r.end===void 0?o:Math.min(r.end,l),!e.extend&&o>r&&(l=r,r=o,o=l),l=Zs(n,o);var i=Zs(n,r);l&&i&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==i.node||e.focusOffset!==i.offset)&&(t=t.createRange(),t.setStart(l.node,l.offset),e.removeAllRanges(),o>r?(e.addRange(t),e.extend(i.node,i.offset)):(t.setEnd(i.node,i.offset),e.addRange(t)))}}for(t=[],e=n;e=e.parentNode;)e.nodeType===1&&t.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof n.focus=="function"&&n.focus(),n=0;n=document.documentMode,wn=null,ji=null,hr=null,Ui=!1;function ea(e,t,n){var r=n.window===n?n.document:n.nodeType===9?n:n.ownerDocument;Ui||wn==null||wn!==Rl(r)||(r=wn,"selectionStart"in r&&Hu(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),hr&&Pr(hr,r)||(hr=r,r=Ml(ji,"onSelect"),0kn||(e.current=Qi[kn],Qi[kn]=null,kn--)}function H(e,t){kn++,Qi[kn]=e.current,e.current=t}var Ht={},ge=Wt(Ht),xe=Wt(!1),un=Ht;function Dn(e,t){var n=e.type.contextTypes;if(!n)return Ht;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var l={},o;for(o in n)l[o]=t[o];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=l),l}function Ce(e){return e=e.childContextTypes,e!=null}function Ul(){Q(xe),Q(ge)}function ua(e,t,n){if(ge.current!==Ht)throw Error(k(168));H(ge,t),H(xe,n)}function af(e,t,n){var r=e.stateNode;if(t=t.childContextTypes,typeof r.getChildContext!="function")return n;r=r.getChildContext();for(var l in r)if(!(l in t))throw Error(k(108,Pp(e)||"Unknown",l));return X({},n,r)}function Il(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||Ht,un=ge.current,H(ge,e),H(xe,xe.current),!0}function sa(e,t,n){var r=e.stateNode;if(!r)throw Error(k(169));n?(e=af(e,t,un),r.__reactInternalMemoizedMergedChildContext=e,Q(xe),Q(ge),H(ge,e)):Q(xe),H(xe,n)}var ct=null,co=!1,Go=!1;function cf(e){ct===null?ct=[e]:ct.push(e)}function Wm(e){co=!0,cf(e)}function Qt(){if(!Go&&ct!==null){Go=!0;var e=0,t=I;try{var n=ct;for(I=1;e>=i,l-=i,ft=1<<32-Ge(t)+l|n<_?(U=T,T=null):U=T.sibling;var O=h(p,T,g[_],E);if(O===null){T===null&&(T=U);break}e&&T&&O.alternate===null&&t(p,T),f=o(O,f,_),L===null?C=O:L.sibling=O,L=O,T=U}if(_===g.length)return n(p,T),K&&qt(p,_),C;if(T===null){for(;__?(U=T,T=null):U=T.sibling;var V=h(p,T,O.value,E);if(V===null){T===null&&(T=U);break}e&&T&&V.alternate===null&&t(p,T),f=o(V,f,_),L===null?C=V:L.sibling=V,L=V,T=U}if(O.done)return n(p,T),K&&qt(p,_),C;if(T===null){for(;!O.done;_++,O=g.next())O=d(p,O.value,E),O!==null&&(f=o(O,f,_),L===null?C=O:L.sibling=O,L=O);return K&&qt(p,_),C}for(T=r(p,T);!O.done;_++,O=g.next())O=y(T,p,_,O.value,E),O!==null&&(e&&O.alternate!==null&&T.delete(O.key===null?_:O.key),f=o(O,f,_),L===null?C=O:L.sibling=O,L=O);return e&&T.forEach(function(He){return t(p,He)}),K&&qt(p,_),C}function P(p,f,g,E){if(typeof g=="object"&&g!==null&&g.type===vn&&g.key===null&&(g=g.props.children),typeof g=="object"&&g!==null){switch(g.$$typeof){case Yr:e:{for(var C=g.key,L=f;L!==null;){if(L.key===C){if(C=g.type,C===vn){if(L.tag===7){n(p,L.sibling),f=l(L,g.props.children),f.return=p,p=f;break e}}else if(L.elementType===C||typeof C=="object"&&C!==null&&C.$$typeof===Tt&&ha(C)===L.type){n(p,L.sibling),f=l(L,g.props),f.ref=er(p,L,g),f.return=p,p=f;break e}n(p,L);break}else t(p,L);L=L.sibling}g.type===vn?(f=rn(g.props.children,p.mode,E,g.key),f.return=p,p=f):(E=Cl(g.type,g.key,g.props,null,p.mode,E),E.ref=er(p,f,g),E.return=p,p=E)}return i(p);case gn:e:{for(L=g.key;f!==null;){if(f.key===L)if(f.tag===4&&f.stateNode.containerInfo===g.containerInfo&&f.stateNode.implementation===g.implementation){n(p,f.sibling),f=l(f,g.children||[]),f.return=p,p=f;break e}else{n(p,f);break}else t(p,f);f=f.sibling}f=ni(g,p.mode,E),f.return=p,p=f}return i(p);case Tt:return L=g._init,P(p,f,L(g._payload),E)}if(ur(g))return m(p,f,g,E);if(qn(g))return v(p,f,g,E);ul(p,g)}return typeof g=="string"&&g!==""||typeof g=="number"?(g=""+g,f!==null&&f.tag===6?(n(p,f.sibling),f=l(f,g),f.return=p,p=f):(n(p,f),f=ti(g,p.mode,E),f.return=p,p=f),i(p)):n(p,f)}return P}var Mn=yf(!0),wf=yf(!1),Hr={},rt=Wt(Hr),Or=Wt(Hr),Fr=Wt(Hr);function en(e){if(e===Hr)throw Error(k(174));return e}function Xu(e,t){switch(H(Fr,t),H(Or,e),H(rt,Hr),e=t.nodeType,e){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:Li(null,"");break;default:e=e===8?t.parentNode:t,t=e.namespaceURI||null,e=e.tagName,t=Li(t,e)}Q(rt),H(rt,t)}function jn(){Q(rt),Q(Or),Q(Fr)}function Sf(e){en(Fr.current);var t=en(rt.current),n=Li(t,e.type);t!==n&&(H(Or,e),H(rt,n))}function Ju(e){Or.current===e&&(Q(rt),Q(Or))}var q=Wt(0);function bl(e){for(var t=e;t!==null;){if(t.tag===13){var n=t.memoizedState;if(n!==null&&(n=n.dehydrated,n===null||n.data==="$?"||n.data==="$!"))return t}else if(t.tag===19&&t.memoizedProps.revealOrder!==void 0){if(t.flags&128)return t}else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===e)break;for(;t.sibling===null;){if(t.return===null||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}var qo=[];function Zu(){for(var e=0;en?n:4,e(!0);var r=Yo.transition;Yo.transition={};try{e(!1),t()}finally{I=n,Yo.transition=r}}function zf(){return Ie().memoizedState}function Gm(e,t,n){var r=Ut(e);if(n={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null},Mf(e))jf(t,n);else if(n=mf(e,t,n,r),n!==null){var l=ye();qe(n,e,r,l),Uf(n,t,r)}}function qm(e,t,n){var r=Ut(e),l={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null};if(Mf(e))jf(t,l);else{var o=e.alternate;if(e.lanes===0&&(o===null||o.lanes===0)&&(o=t.lastRenderedReducer,o!==null))try{var i=t.lastRenderedState,u=o(i,n);if(l.hasEagerState=!0,l.eagerState=u,Ye(u,i)){var s=t.interleaved;s===null?(l.next=l,qu(t)):(l.next=s.next,s.next=l),t.interleaved=l;return}}catch{}finally{}n=mf(e,t,l,r),n!==null&&(l=ye(),qe(n,e,r,l),Uf(n,t,r))}}function Mf(e){var t=e.alternate;return e===Y||t!==null&&t===Y}function jf(e,t){gr=Kl=!0;var n=e.pending;n===null?t.next=t:(t.next=n.next,n.next=t),e.pending=t}function Uf(e,t,n){if(n&4194240){var r=t.lanes;r&=e.pendingLanes,n|=r,t.lanes=n,Du(e,n)}}var Gl={readContext:Ue,useCallback:de,useContext:de,useEffect:de,useImperativeHandle:de,useInsertionEffect:de,useLayoutEffect:de,useMemo:de,useReducer:de,useRef:de,useState:de,useDebugValue:de,useDeferredValue:de,useTransition:de,useMutableSource:de,useSyncExternalStore:de,useId:de,unstable_isNewReconciler:!1},Ym={readContext:Ue,useCallback:function(e,t){return Ze().memoizedState=[e,t===void 0?null:t],e},useContext:Ue,useEffect:va,useImperativeHandle:function(e,t,n){return n=n!=null?n.concat([e]):null,Sl(4194308,4,Of.bind(null,t,e),n)},useLayoutEffect:function(e,t){return Sl(4194308,4,e,t)},useInsertionEffect:function(e,t){return Sl(4,2,e,t)},useMemo:function(e,t){var n=Ze();return t=t===void 0?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=Ze();return t=n!==void 0?n(t):t,r.memoizedState=r.baseState=t,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:t},r.queue=e,e=e.dispatch=Gm.bind(null,Y,e),[r.memoizedState,e]},useRef:function(e){var t=Ze();return e={current:e},t.memoizedState=e},useState:ga,useDebugValue:ls,useDeferredValue:function(e){return Ze().memoizedState=e},useTransition:function(){var e=ga(!1),t=e[0];return e=Km.bind(null,e[1]),Ze().memoizedState=e,[t,e]},useMutableSource:function(){},useSyncExternalStore:function(e,t,n){var r=Y,l=Ze();if(K){if(n===void 0)throw Error(k(407));n=n()}else{if(n=t(),se===null)throw Error(k(349));an&30||xf(r,t,n)}l.memoizedState=n;var o={value:n,getSnapshot:t};return l.queue=o,va(Tf.bind(null,r,o,e),[e]),r.flags|=2048,Dr(9,Cf.bind(null,r,o,n,t),void 0,null),n},useId:function(){var e=Ze(),t=se.identifierPrefix;if(K){var n=dt,r=ft;n=(r&~(1<<32-Ge(r)-1)).toString(32)+n,t=":"+t+"R"+n,n=Ar++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=i.createElement(n,{is:r.is}):(e=i.createElement(n),n==="select"&&(i=e,r.multiple?i.multiple=!0:r.size&&(i.size=r.size))):e=i.createElementNS(e,n),e[et]=t,e[Rr]=r,Gf(e,t,!1,!1),t.stateNode=e;e:{switch(i=Ri(n,r),n){case"dialog":W("cancel",e),W("close",e),l=r;break;case"iframe":case"object":case"embed":W("load",e),l=r;break;case"video":case"audio":for(l=0;lIn&&(t.flags|=128,r=!0,tr(o,!1),t.lanes=4194304)}else{if(!r)if(e=bl(i),e!==null){if(t.flags|=128,r=!0,n=e.updateQueue,n!==null&&(t.updateQueue=n,t.flags|=4),tr(o,!0),o.tail===null&&o.tailMode==="hidden"&&!i.alternate&&!K)return pe(t),null}else 2*Z()-o.renderingStartTime>In&&n!==1073741824&&(t.flags|=128,r=!0,tr(o,!1),t.lanes=4194304);o.isBackwards?(i.sibling=t.child,t.child=i):(n=o.last,n!==null?n.sibling=i:t.child=i,o.last=i)}return o.tail!==null?(t=o.tail,o.rendering=t,o.tail=t.sibling,o.renderingStartTime=Z(),t.sibling=null,n=q.current,H(q,r?n&1|2:n&1),t):(pe(t),null);case 22:case 23:return cs(),r=t.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(t.flags|=8192),r&&t.mode&1?Ne&1073741824&&(pe(t),t.subtreeFlags&6&&(t.flags|=8192)):pe(t),null;case 24:return null;case 25:return null}throw Error(k(156,t.tag))}function lh(e,t){switch(Wu(t),t.tag){case 1:return Ce(t.type)&&Ul(),e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 3:return jn(),Q(xe),Q(ge),Zu(),e=t.flags,e&65536&&!(e&128)?(t.flags=e&-65537|128,t):null;case 5:return Ju(t),null;case 13:if(Q(q),e=t.memoizedState,e!==null&&e.dehydrated!==null){if(t.alternate===null)throw Error(k(340));zn()}return e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 19:return Q(q),null;case 4:return jn(),null;case 10:return Gu(t.type._context),null;case 22:case 23:return cs(),null;case 24:return null;default:return null}}var al=!1,me=!1,oh=typeof WeakSet=="function"?WeakSet:Set,R=null;function Nn(e,t){var n=e.ref;if(n!==null)if(typeof n=="function")try{n(null)}catch(r){J(e,t,r)}else n.current=null}function ru(e,t,n){try{n()}catch(r){J(e,t,r)}}var Na=!1;function ih(e,t){if(Ii=Dl,e=Zc(),Hu(e)){if("selectionStart"in e)var n={start:e.selectionStart,end:e.selectionEnd};else e:{n=(n=e.ownerDocument)&&n.defaultView||window;var r=n.getSelection&&n.getSelection();if(r&&r.rangeCount!==0){n=r.anchorNode;var l=r.anchorOffset,o=r.focusNode;r=r.focusOffset;try{n.nodeType,o.nodeType}catch{n=null;break e}var i=0,u=-1,s=-1,a=0,c=0,d=e,h=null;t:for(;;){for(var y;d!==n||l!==0&&d.nodeType!==3||(u=i+l),d!==o||r!==0&&d.nodeType!==3||(s=i+r),d.nodeType===3&&(i+=d.nodeValue.length),(y=d.firstChild)!==null;)h=d,d=y;for(;;){if(d===e)break t;if(h===n&&++a===l&&(u=i),h===o&&++c===r&&(s=i),(y=d.nextSibling)!==null)break;d=h,h=d.parentNode}d=y}n=u===-1||s===-1?null:{start:u,end:s}}else n=null}n=n||{start:0,end:0}}else n=null;for(Bi={focusedElem:e,selectionRange:n},Dl=!1,R=t;R!==null;)if(t=R,e=t.child,(t.subtreeFlags&1028)!==0&&e!==null)e.return=t,R=e;else for(;R!==null;){t=R;try{var m=t.alternate;if(t.flags&1024)switch(t.tag){case 0:case 11:case 15:break;case 1:if(m!==null){var v=m.memoizedProps,P=m.memoizedState,p=t.stateNode,f=p.getSnapshotBeforeUpdate(t.elementType===t.type?v:Qe(t.type,v),P);p.__reactInternalSnapshotBeforeUpdate=f}break;case 3:var g=t.stateNode.containerInfo;g.nodeType===1?g.textContent="":g.nodeType===9&&g.documentElement&&g.removeChild(g.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(k(163))}}catch(E){J(t,t.return,E)}if(e=t.sibling,e!==null){e.return=t.return,R=e;break}R=t.return}return m=Na,Na=!1,m}function vr(e,t,n){var r=t.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var o=l.destroy;l.destroy=void 0,o!==void 0&&ru(t,n,o)}l=l.next}while(l!==r)}}function mo(e,t){if(t=t.updateQueue,t=t!==null?t.lastEffect:null,t!==null){var n=t=t.next;do{if((n.tag&e)===e){var r=n.create;n.destroy=r()}n=n.next}while(n!==t)}}function lu(e){var t=e.ref;if(t!==null){var n=e.stateNode;switch(e.tag){case 5:e=n;break;default:e=n}typeof t=="function"?t(e):t.current=e}}function Xf(e){var t=e.alternate;t!==null&&(e.alternate=null,Xf(t)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(t=e.stateNode,t!==null&&(delete t[et],delete t[Rr],delete t[Wi],delete t[Hm],delete t[Vm])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function Jf(e){return e.tag===5||e.tag===3||e.tag===4}function Pa(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||Jf(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function ou(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.nodeType===8?n.parentNode.insertBefore(e,t):n.insertBefore(e,t):(n.nodeType===8?(t=n.parentNode,t.insertBefore(e,n)):(t=n,t.appendChild(e)),n=n._reactRootContainer,n!=null||t.onclick!==null||(t.onclick=jl));else if(r!==4&&(e=e.child,e!==null))for(ou(e,t,n),e=e.sibling;e!==null;)ou(e,t,n),e=e.sibling}function iu(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.insertBefore(e,t):n.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for(iu(e,t,n),e=e.sibling;e!==null;)iu(e,t,n),e=e.sibling}var ae=null,be=!1;function xt(e,t,n){for(n=n.child;n!==null;)Zf(e,t,n),n=n.sibling}function Zf(e,t,n){if(nt&&typeof nt.onCommitFiberUnmount=="function")try{nt.onCommitFiberUnmount(oo,n)}catch{}switch(n.tag){case 5:me||Nn(n,t);case 6:var r=ae,l=be;ae=null,xt(e,t,n),ae=r,be=l,ae!==null&&(be?(e=ae,n=n.stateNode,e.nodeType===8?e.parentNode.removeChild(n):e.removeChild(n)):ae.removeChild(n.stateNode));break;case 18:ae!==null&&(be?(e=ae,n=n.stateNode,e.nodeType===8?Ko(e.parentNode,n):e.nodeType===1&&Ko(e,n),Tr(e)):Ko(ae,n.stateNode));break;case 4:r=ae,l=be,ae=n.stateNode.containerInfo,be=!0,xt(e,t,n),ae=r,be=l;break;case 0:case 11:case 14:case 15:if(!me&&(r=n.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var o=l,i=o.destroy;o=o.tag,i!==void 0&&(o&2||o&4)&&ru(n,t,i),l=l.next}while(l!==r)}xt(e,t,n);break;case 1:if(!me&&(Nn(n,t),r=n.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=n.memoizedProps,r.state=n.memoizedState,r.componentWillUnmount()}catch(u){J(n,t,u)}xt(e,t,n);break;case 21:xt(e,t,n);break;case 22:n.mode&1?(me=(r=me)||n.memoizedState!==null,xt(e,t,n),me=r):xt(e,t,n);break;default:xt(e,t,n)}}function La(e){var t=e.updateQueue;if(t!==null){e.updateQueue=null;var n=e.stateNode;n===null&&(n=e.stateNode=new oh),t.forEach(function(r){var l=hh.bind(null,e,r);n.has(r)||(n.add(r),r.then(l,l))})}}function We(e,t){var n=t.deletions;if(n!==null)for(var r=0;rl&&(l=i),r&=~o}if(r=l,r=Z()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*sh(r/1960))-r,10e?16:e,Ft===null)var r=!1;else{if(e=Ft,Ft=null,Xl=0,M&6)throw Error(k(331));var l=M;for(M|=4,R=e.current;R!==null;){var o=R,i=o.child;if(R.flags&16){var u=o.deletions;if(u!==null){for(var s=0;sZ()-ss?nn(e,0):us|=n),Te(e,t)}function ud(e,t){t===0&&(e.mode&1?(t=el,el<<=1,!(el&130023424)&&(el=4194304)):t=1);var n=ye();e=St(e,t),e!==null&&(Ur(e,t,n),Te(e,n))}function mh(e){var t=e.memoizedState,n=0;t!==null&&(n=t.retryLane),ud(e,n)}function hh(e,t){var n=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(n=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(k(314))}r!==null&&r.delete(t),ud(e,n)}var sd;sd=function(e,t,n){if(e!==null)if(e.memoizedProps!==t.pendingProps||xe.current)ke=!0;else{if(!(e.lanes&n)&&!(t.flags&128))return ke=!1,nh(e,t,n);ke=!!(e.flags&131072)}else ke=!1,K&&t.flags&1048576&&ff(t,Hl,t.index);switch(t.lanes=0,t.tag){case 2:var r=t.type;El(e,t),e=t.pendingProps;var l=Dn(t,ge.current);Fn(t,n),l=ts(null,t,r,e,l,n);var o=ns();return t.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(t.tag=1,t.memoizedState=null,t.updateQueue=null,Ce(r)?(o=!0,Il(t)):o=!1,t.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,Yu(t),l.updater=fo,t.stateNode=l,l._reactInternals=t,Yi(t,r,e,n),t=Zi(null,t,r,!0,o,n)):(t.tag=0,K&&o&&Vu(t),ve(null,t,l,n),t=t.child),t;case 16:r=t.elementType;e:{switch(El(e,t),e=t.pendingProps,l=r._init,r=l(r._payload),t.type=r,l=t.tag=vh(r),e=Qe(r,e),l){case 0:t=Ji(null,t,r,e,n);break e;case 1:t=xa(null,t,r,e,n);break e;case 11:t=Ea(null,t,r,e,n);break e;case 14:t=ka(null,t,r,Qe(r.type,e),n);break e}throw Error(k(306,r,""))}return t;case 0:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Qe(r,l),Ji(e,t,r,l,n);case 1:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Qe(r,l),xa(e,t,r,l,n);case 3:e:{if(Qf(t),e===null)throw Error(k(387));r=t.pendingProps,o=t.memoizedState,l=o.element,hf(e,t),Ql(t,r,null,n);var i=t.memoizedState;if(r=i.element,o.isDehydrated)if(o={element:r,isDehydrated:!1,cache:i.cache,pendingSuspenseBoundaries:i.pendingSuspenseBoundaries,transitions:i.transitions},t.updateQueue.baseState=o,t.memoizedState=o,t.flags&256){l=Un(Error(k(423)),t),t=Ca(e,t,r,n,l);break e}else if(r!==l){l=Un(Error(k(424)),t),t=Ca(e,t,r,n,l);break e}else for(Le=zt(t.stateNode.containerInfo.firstChild),_e=t,K=!0,Ke=null,n=wf(t,null,r,n),t.child=n;n;)n.flags=n.flags&-3|4096,n=n.sibling;else{if(zn(),r===l){t=Et(e,t,n);break e}ve(e,t,r,n)}t=t.child}return t;case 5:return Sf(t),e===null&&Ki(t),r=t.type,l=t.pendingProps,o=e!==null?e.memoizedProps:null,i=l.children,Hi(r,l)?i=null:o!==null&&Hi(r,o)&&(t.flags|=32),Wf(e,t),ve(e,t,i,n),t.child;case 6:return e===null&&Ki(t),null;case 13:return bf(e,t,n);case 4:return Xu(t,t.stateNode.containerInfo),r=t.pendingProps,e===null?t.child=Mn(t,null,r,n):ve(e,t,r,n),t.child;case 11:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Qe(r,l),Ea(e,t,r,l,n);case 7:return ve(e,t,t.pendingProps,n),t.child;case 8:return ve(e,t,t.pendingProps.children,n),t.child;case 12:return ve(e,t,t.pendingProps.children,n),t.child;case 10:e:{if(r=t.type._context,l=t.pendingProps,o=t.memoizedProps,i=l.value,H(Vl,r._currentValue),r._currentValue=i,o!==null)if(Ye(o.value,i)){if(o.children===l.children&&!xe.current){t=Et(e,t,n);break e}}else for(o=t.child,o!==null&&(o.return=t);o!==null;){var u=o.dependencies;if(u!==null){i=o.child;for(var s=u.firstContext;s!==null;){if(s.context===r){if(o.tag===1){s=mt(-1,n&-n),s.tag=2;var a=o.updateQueue;if(a!==null){a=a.shared;var c=a.pending;c===null?s.next=s:(s.next=c.next,c.next=s),a.pending=s}}o.lanes|=n,s=o.alternate,s!==null&&(s.lanes|=n),Gi(o.return,n,t),u.lanes|=n;break}s=s.next}}else if(o.tag===10)i=o.type===t.type?null:o.child;else if(o.tag===18){if(i=o.return,i===null)throw Error(k(341));i.lanes|=n,u=i.alternate,u!==null&&(u.lanes|=n),Gi(i,n,t),i=o.sibling}else i=o.child;if(i!==null)i.return=o;else for(i=o;i!==null;){if(i===t){i=null;break}if(o=i.sibling,o!==null){o.return=i.return,i=o;break}i=i.return}o=i}ve(e,t,l.children,n),t=t.child}return t;case 9:return l=t.type,r=t.pendingProps.children,Fn(t,n),l=Ue(l),r=r(l),t.flags|=1,ve(e,t,r,n),t.child;case 14:return r=t.type,l=Qe(r,t.pendingProps),l=Qe(r.type,l),ka(e,t,r,l,n);case 15:return Hf(e,t,t.type,t.pendingProps,n);case 17:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Qe(r,l),El(e,t),t.tag=1,Ce(r)?(e=!0,Il(t)):e=!1,Fn(t,n),vf(t,r,l),Yi(t,r,l,n),Zi(null,t,r,!0,e,n);case 19:return Kf(e,t,n);case 22:return Vf(e,t,n)}throw Error(k(156,t.tag))};function ad(e,t){return $c(e,t)}function gh(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function ze(e,t,n,r){return new gh(e,t,n,r)}function ds(e){return e=e.prototype,!(!e||!e.isReactComponent)}function vh(e){if(typeof e=="function")return ds(e)?1:0;if(e!=null){if(e=e.$$typeof,e===Ou)return 11;if(e===Fu)return 14}return 2}function It(e,t){var n=e.alternate;return n===null?(n=ze(e.tag,t,e.key,e.mode),n.elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.type=e.type,n.flags=0,n.subtreeFlags=0,n.deletions=null),n.flags=e.flags&14680064,n.childLanes=e.childLanes,n.lanes=e.lanes,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,t=e.dependencies,n.dependencies=t===null?null:{lanes:t.lanes,firstContext:t.firstContext},n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function Cl(e,t,n,r,l,o){var i=2;if(r=e,typeof e=="function")ds(e)&&(i=1);else if(typeof e=="string")i=5;else e:switch(e){case vn:return rn(n.children,l,o,t);case Ru:i=8,l|=8;break;case Si:return e=ze(12,n,t,l|2),e.elementType=Si,e.lanes=o,e;case Ei:return e=ze(13,n,t,l),e.elementType=Ei,e.lanes=o,e;case ki:return e=ze(19,n,t,l),e.elementType=ki,e.lanes=o,e;case vc:return go(n,l,o,t);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case hc:i=10;break e;case gc:i=9;break e;case Ou:i=11;break e;case Fu:i=14;break e;case Tt:i=16,r=null;break e}throw Error(k(130,e==null?e:typeof e,""))}return t=ze(i,n,t,l),t.elementType=e,t.type=r,t.lanes=o,t}function rn(e,t,n,r){return e=ze(7,e,r,t),e.lanes=n,e}function go(e,t,n,r){return e=ze(22,e,r,t),e.elementType=vc,e.lanes=n,e.stateNode={isHidden:!1},e}function ti(e,t,n){return e=ze(6,e,null,t),e.lanes=n,e}function ni(e,t,n){return t=ze(4,e.children!==null?e.children:[],e.key,t),t.lanes=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function yh(e,t,n,r,l){this.tag=t,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=zo(0),this.expirationTimes=zo(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=zo(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function ps(e,t,n,r,l,o,i,u,s){return e=new yh(e,t,n,u,s),t===1?(t=1,o===!0&&(t|=8)):t=0,o=ze(3,null,null,t),e.current=o,o.stateNode=e,o.memoizedState={element:r,isDehydrated:n,cache:null,transitions:null,pendingSuspenseBoundaries:null},Yu(o),e}function wh(e,t,n){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(pd)}catch(e){console.error(e)}}pd(),cc.exports=Oe;var md=cc.exports,za=md;yi.createRoot=za.createRoot,yi.hydrateRoot=za.hydrateRoot;function hd(e,t){return function(){return e.apply(t,arguments)}}const{toString:Ch}=Object.prototype,{getPrototypeOf:vs}=Object,Eo=(e=>t=>{const n=Ch.call(t);return e[n]||(e[n]=n.slice(8,-1).toLowerCase())})(Object.create(null)),lt=e=>(e=e.toLowerCase(),t=>Eo(t)===e),ko=e=>t=>typeof t===e,{isArray:Qn}=Array,Mr=ko("undefined");function Th(e){return e!==null&&!Mr(e)&&e.constructor!==null&&!Mr(e.constructor)&&je(e.constructor.isBuffer)&&e.constructor.isBuffer(e)}const gd=lt("ArrayBuffer");function Nh(e){let t;return typeof ArrayBuffer<"u"&&ArrayBuffer.isView?t=ArrayBuffer.isView(e):t=e&&e.buffer&&gd(e.buffer),t}const Ph=ko("string"),je=ko("function"),vd=ko("number"),xo=e=>e!==null&&typeof e=="object",Lh=e=>e===!0||e===!1,Tl=e=>{if(Eo(e)!=="object")return!1;const t=vs(e);return(t===null||t===Object.prototype||Object.getPrototypeOf(t)===null)&&!(Symbol.toStringTag in e)&&!(Symbol.iterator in e)},_h=lt("Date"),Rh=lt("File"),Oh=lt("Blob"),Fh=lt("FileList"),Ah=e=>xo(e)&&je(e.pipe),$h=e=>{let t;return e&&(typeof FormData=="function"&&e instanceof FormData||je(e.append)&&((t=Eo(e))==="formdata"||t==="object"&&je(e.toString)&&e.toString()==="[object FormData]"))},Dh=lt("URLSearchParams"),zh=e=>e.trim?e.trim():e.replace(/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,"");function Vr(e,t,{allOwnKeys:n=!1}={}){if(e===null||typeof e>"u")return;let r,l;if(typeof e!="object"&&(e=[e]),Qn(e))for(r=0,l=e.length;r0;)if(l=n[r],t===l.toLowerCase())return l;return null}const wd=(()=>typeof globalThis<"u"?globalThis:typeof self<"u"?self:typeof window<"u"?window:global)(),Sd=e=>!Mr(e)&&e!==wd;function fu(){const{caseless:e}=Sd(this)&&this||{},t={},n=(r,l)=>{const o=e&&yd(t,l)||l;Tl(t[o])&&Tl(r)?t[o]=fu(t[o],r):Tl(r)?t[o]=fu({},r):Qn(r)?t[o]=r.slice():t[o]=r};for(let r=0,l=arguments.length;r(Vr(t,(l,o)=>{n&&je(l)?e[o]=hd(l,n):e[o]=l},{allOwnKeys:r}),e),jh=e=>(e.charCodeAt(0)===65279&&(e=e.slice(1)),e),Uh=(e,t,n,r)=>{e.prototype=Object.create(t.prototype,r),e.prototype.constructor=e,Object.defineProperty(e,"super",{value:t.prototype}),n&&Object.assign(e.prototype,n)},Ih=(e,t,n,r)=>{let l,o,i;const u={};if(t=t||{},e==null)return t;do{for(l=Object.getOwnPropertyNames(e),o=l.length;o-- >0;)i=l[o],(!r||r(i,e,t))&&!u[i]&&(t[i]=e[i],u[i]=!0);e=n!==!1&&vs(e)}while(e&&(!n||n(e,t))&&e!==Object.prototype);return t},Bh=(e,t,n)=>{e=String(e),(n===void 0||n>e.length)&&(n=e.length),n-=t.length;const r=e.indexOf(t,n);return r!==-1&&r===n},Hh=e=>{if(!e)return null;if(Qn(e))return e;let t=e.length;if(!vd(t))return null;const n=new Array(t);for(;t-- >0;)n[t]=e[t];return n},Vh=(e=>t=>e&&t instanceof e)(typeof Uint8Array<"u"&&vs(Uint8Array)),Wh=(e,t)=>{const r=(e&&e[Symbol.iterator]).call(e);let l;for(;(l=r.next())&&!l.done;){const o=l.value;t.call(e,o[0],o[1])}},Qh=(e,t)=>{let n;const r=[];for(;(n=e.exec(t))!==null;)r.push(n);return r},bh=lt("HTMLFormElement"),Kh=e=>e.toLowerCase().replace(/[-_\s]([a-z\d])(\w*)/g,function(n,r,l){return r.toUpperCase()+l}),Ma=(({hasOwnProperty:e})=>(t,n)=>e.call(t,n))(Object.prototype),Gh=lt("RegExp"),Ed=(e,t)=>{const n=Object.getOwnPropertyDescriptors(e),r={};Vr(n,(l,o)=>{t(l,o,e)!==!1&&(r[o]=l)}),Object.defineProperties(e,r)},qh=e=>{Ed(e,(t,n)=>{if(je(e)&&["arguments","caller","callee"].indexOf(n)!==-1)return!1;const r=e[n];if(je(r)){if(t.enumerable=!1,"writable"in t){t.writable=!1;return}t.set||(t.set=()=>{throw Error("Can not rewrite read-only method '"+n+"'")})}})},Yh=(e,t)=>{const n={},r=l=>{l.forEach(o=>{n[o]=!0})};return Qn(e)?r(e):r(String(e).split(t)),n},Xh=()=>{},Jh=(e,t)=>(e=+e,Number.isFinite(e)?e:t),ri="abcdefghijklmnopqrstuvwxyz",ja="0123456789",kd={DIGIT:ja,ALPHA:ri,ALPHA_DIGIT:ri+ri.toUpperCase()+ja},Zh=(e=16,t=kd.ALPHA_DIGIT)=>{let n="";const{length:r}=t;for(;e--;)n+=t[Math.random()*r|0];return n};function e0(e){return!!(e&&je(e.append)&&e[Symbol.toStringTag]==="FormData"&&e[Symbol.iterator])}const t0=e=>{const t=new Array(10),n=(r,l)=>{if(xo(r)){if(t.indexOf(r)>=0)return;if(!("toJSON"in r)){t[l]=r;const o=Qn(r)?[]:{};return Vr(r,(i,u)=>{const s=n(i,l+1);!Mr(s)&&(o[u]=s)}),t[l]=void 0,o}}return r};return n(e,0)},n0=lt("AsyncFunction"),r0=e=>e&&(xo(e)||je(e))&&je(e.then)&&je(e.catch),S={isArray:Qn,isArrayBuffer:gd,isBuffer:Th,isFormData:$h,isArrayBufferView:Nh,isString:Ph,isNumber:vd,isBoolean:Lh,isObject:xo,isPlainObject:Tl,isUndefined:Mr,isDate:_h,isFile:Rh,isBlob:Oh,isRegExp:Gh,isFunction:je,isStream:Ah,isURLSearchParams:Dh,isTypedArray:Vh,isFileList:Fh,forEach:Vr,merge:fu,extend:Mh,trim:zh,stripBOM:jh,inherits:Uh,toFlatObject:Ih,kindOf:Eo,kindOfTest:lt,endsWith:Bh,toArray:Hh,forEachEntry:Wh,matchAll:Qh,isHTMLForm:bh,hasOwnProperty:Ma,hasOwnProp:Ma,reduceDescriptors:Ed,freezeMethods:qh,toObjectSet:Yh,toCamelCase:Kh,noop:Xh,toFiniteNumber:Jh,findKey:yd,global:wd,isContextDefined:Sd,ALPHABET:kd,generateString:Zh,isSpecCompliantForm:e0,toJSONObject:t0,isAsyncFn:n0,isThenable:r0};function z(e,t,n,r,l){Error.call(this),Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=new Error().stack,this.message=e,this.name="AxiosError",t&&(this.code=t),n&&(this.config=n),r&&(this.request=r),l&&(this.response=l)}S.inherits(z,Error,{toJSON:function(){return{message:this.message,name:this.name,description:this.description,number:this.number,fileName:this.fileName,lineNumber:this.lineNumber,columnNumber:this.columnNumber,stack:this.stack,config:S.toJSONObject(this.config),code:this.code,status:this.response&&this.response.status?this.response.status:null}}});const xd=z.prototype,Cd={};["ERR_BAD_OPTION_VALUE","ERR_BAD_OPTION","ECONNABORTED","ETIMEDOUT","ERR_NETWORK","ERR_FR_TOO_MANY_REDIRECTS","ERR_DEPRECATED","ERR_BAD_RESPONSE","ERR_BAD_REQUEST","ERR_CANCELED","ERR_NOT_SUPPORT","ERR_INVALID_URL"].forEach(e=>{Cd[e]={value:e}});Object.defineProperties(z,Cd);Object.defineProperty(xd,"isAxiosError",{value:!0});z.from=(e,t,n,r,l,o)=>{const i=Object.create(xd);return S.toFlatObject(e,i,function(s){return s!==Error.prototype},u=>u!=="isAxiosError"),z.call(i,e.message,t,n,r,l),i.cause=e,i.name=e.name,o&&Object.assign(i,o),i};const l0=null;function du(e){return S.isPlainObject(e)||S.isArray(e)}function Td(e){return S.endsWith(e,"[]")?e.slice(0,-2):e}function Ua(e,t,n){return e?e.concat(t).map(function(l,o){return l=Td(l),!n&&o?"["+l+"]":l}).join(n?".":""):t}function o0(e){return S.isArray(e)&&!e.some(du)}const i0=S.toFlatObject(S,{},null,function(t){return/^is[A-Z]/.test(t)});function Co(e,t,n){if(!S.isObject(e))throw new TypeError("target must be an object");t=t||new FormData,n=S.toFlatObject(n,{metaTokens:!0,dots:!1,indexes:!1},!1,function(v,P){return!S.isUndefined(P[v])});const r=n.metaTokens,l=n.visitor||c,o=n.dots,i=n.indexes,s=(n.Blob||typeof Blob<"u"&&Blob)&&S.isSpecCompliantForm(t);if(!S.isFunction(l))throw new TypeError("visitor must be a function");function a(m){if(m===null)return"";if(S.isDate(m))return m.toISOString();if(!s&&S.isBlob(m))throw new z("Blob is not supported. Use a Buffer instead.");return S.isArrayBuffer(m)||S.isTypedArray(m)?s&&typeof Blob=="function"?new Blob([m]):Buffer.from(m):m}function c(m,v,P){let p=m;if(m&&!P&&typeof m=="object"){if(S.endsWith(v,"{}"))v=r?v:v.slice(0,-2),m=JSON.stringify(m);else if(S.isArray(m)&&o0(m)||(S.isFileList(m)||S.endsWith(v,"[]"))&&(p=S.toArray(m)))return v=Td(v),p.forEach(function(g,E){!(S.isUndefined(g)||g===null)&&t.append(i===!0?Ua([v],E,o):i===null?v:v+"[]",a(g))}),!1}return du(m)?!0:(t.append(Ua(P,v,o),a(m)),!1)}const d=[],h=Object.assign(i0,{defaultVisitor:c,convertValue:a,isVisitable:du});function y(m,v){if(!S.isUndefined(m)){if(d.indexOf(m)!==-1)throw Error("Circular reference detected in "+v.join("."));d.push(m),S.forEach(m,function(p,f){(!(S.isUndefined(p)||p===null)&&l.call(t,p,S.isString(f)?f.trim():f,v,h))===!0&&y(p,v?v.concat(f):[f])}),d.pop()}}if(!S.isObject(e))throw new TypeError("data must be an object");return y(e),t}function Ia(e){const t={"!":"%21","'":"%27","(":"%28",")":"%29","~":"%7E","%20":"+","%00":"\0"};return encodeURIComponent(e).replace(/[!'()~]|%20|%00/g,function(r){return t[r]})}function ys(e,t){this._pairs=[],e&&Co(e,this,t)}const Nd=ys.prototype;Nd.append=function(t,n){this._pairs.push([t,n])};Nd.toString=function(t){const n=t?function(r){return t.call(this,r,Ia)}:Ia;return this._pairs.map(function(l){return n(l[0])+"="+n(l[1])},"").join("&")};function u0(e){return encodeURIComponent(e).replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%20/g,"+").replace(/%5B/gi,"[").replace(/%5D/gi,"]")}function Pd(e,t,n){if(!t)return e;const r=n&&n.encode||u0,l=n&&n.serialize;let o;if(l?o=l(t,n):o=S.isURLSearchParams(t)?t.toString():new ys(t,n).toString(r),o){const i=e.indexOf("#");i!==-1&&(e=e.slice(0,i)),e+=(e.indexOf("?")===-1?"?":"&")+o}return e}class s0{constructor(){this.handlers=[]}use(t,n,r){return this.handlers.push({fulfilled:t,rejected:n,synchronous:r?r.synchronous:!1,runWhen:r?r.runWhen:null}),this.handlers.length-1}eject(t){this.handlers[t]&&(this.handlers[t]=null)}clear(){this.handlers&&(this.handlers=[])}forEach(t){S.forEach(this.handlers,function(r){r!==null&&t(r)})}}const Ba=s0,Ld={silentJSONParsing:!0,forcedJSONParsing:!0,clarifyTimeoutError:!1},a0=typeof URLSearchParams<"u"?URLSearchParams:ys,c0=typeof FormData<"u"?FormData:null,f0=typeof Blob<"u"?Blob:null,d0=(()=>{let e;return typeof navigator<"u"&&((e=navigator.product)==="ReactNative"||e==="NativeScript"||e==="NS")?!1:typeof window<"u"&&typeof document<"u"})(),p0=(()=>typeof WorkerGlobalScope<"u"&&self instanceof WorkerGlobalScope&&typeof self.importScripts=="function")(),tt={isBrowser:!0,classes:{URLSearchParams:a0,FormData:c0,Blob:f0},isStandardBrowserEnv:d0,isStandardBrowserWebWorkerEnv:p0,protocols:["http","https","file","blob","url","data"]};function m0(e,t){return Co(e,new tt.classes.URLSearchParams,Object.assign({visitor:function(n,r,l,o){return tt.isNode&&S.isBuffer(n)?(this.append(r,n.toString("base64")),!1):o.defaultVisitor.apply(this,arguments)}},t))}function h0(e){return S.matchAll(/\w+|\[(\w*)]/g,e).map(t=>t[0]==="[]"?"":t[1]||t[0])}function g0(e){const t={},n=Object.keys(e);let r;const l=n.length;let o;for(r=0;r=n.length;return i=!i&&S.isArray(l)?l.length:i,s?(S.hasOwnProp(l,i)?l[i]=[l[i],r]:l[i]=r,!u):((!l[i]||!S.isObject(l[i]))&&(l[i]=[]),t(n,r,l[i],o)&&S.isArray(l[i])&&(l[i]=g0(l[i])),!u)}if(S.isFormData(e)&&S.isFunction(e.entries)){const n={};return S.forEachEntry(e,(r,l)=>{t(h0(r),l,n,0)}),n}return null}const v0={"Content-Type":void 0};function y0(e,t,n){if(S.isString(e))try{return(t||JSON.parse)(e),S.trim(e)}catch(r){if(r.name!=="SyntaxError")throw r}return(n||JSON.stringify)(e)}const To={transitional:Ld,adapter:["xhr","http"],transformRequest:[function(t,n){const r=n.getContentType()||"",l=r.indexOf("application/json")>-1,o=S.isObject(t);if(o&&S.isHTMLForm(t)&&(t=new FormData(t)),S.isFormData(t))return l&&l?JSON.stringify(_d(t)):t;if(S.isArrayBuffer(t)||S.isBuffer(t)||S.isStream(t)||S.isFile(t)||S.isBlob(t))return t;if(S.isArrayBufferView(t))return t.buffer;if(S.isURLSearchParams(t))return n.setContentType("application/x-www-form-urlencoded;charset=utf-8",!1),t.toString();let u;if(o){if(r.indexOf("application/x-www-form-urlencoded")>-1)return m0(t,this.formSerializer).toString();if((u=S.isFileList(t))||r.indexOf("multipart/form-data")>-1){const s=this.env&&this.env.FormData;return Co(u?{"files[]":t}:t,s&&new s,this.formSerializer)}}return o||l?(n.setContentType("application/json",!1),y0(t)):t}],transformResponse:[function(t){const n=this.transitional||To.transitional,r=n&&n.forcedJSONParsing,l=this.responseType==="json";if(t&&S.isString(t)&&(r&&!this.responseType||l)){const i=!(n&&n.silentJSONParsing)&&l;try{return JSON.parse(t)}catch(u){if(i)throw u.name==="SyntaxError"?z.from(u,z.ERR_BAD_RESPONSE,this,null,this.response):u}}return t}],timeout:0,xsrfCookieName:"XSRF-TOKEN",xsrfHeaderName:"X-XSRF-TOKEN",maxContentLength:-1,maxBodyLength:-1,env:{FormData:tt.classes.FormData,Blob:tt.classes.Blob},validateStatus:function(t){return t>=200&&t<300},headers:{common:{Accept:"application/json, text/plain, */*"}}};S.forEach(["delete","get","head"],function(t){To.headers[t]={}});S.forEach(["post","put","patch"],function(t){To.headers[t]=S.merge(v0)});const ws=To,w0=S.toObjectSet(["age","authorization","content-length","content-type","etag","expires","from","host","if-modified-since","if-unmodified-since","last-modified","location","max-forwards","proxy-authorization","referer","retry-after","user-agent"]),S0=e=>{const t={};let n,r,l;return e&&e.split(` -`).forEach(function(i){l=i.indexOf(":"),n=i.substring(0,l).trim().toLowerCase(),r=i.substring(l+1).trim(),!(!n||t[n]&&w0[n])&&(n==="set-cookie"?t[n]?t[n].push(r):t[n]=[r]:t[n]=t[n]?t[n]+", "+r:r)}),t},Ha=Symbol("internals");function rr(e){return e&&String(e).trim().toLowerCase()}function Nl(e){return e===!1||e==null?e:S.isArray(e)?e.map(Nl):String(e)}function E0(e){const t=Object.create(null),n=/([^\s,;=]+)\s*(?:=\s*([^,;]+))?/g;let r;for(;r=n.exec(e);)t[r[1]]=r[2];return t}const k0=e=>/^[-_a-zA-Z0-9^`|~,!#$%&'*+.]+$/.test(e.trim());function li(e,t,n,r,l){if(S.isFunction(r))return r.call(this,t,n);if(l&&(t=n),!!S.isString(t)){if(S.isString(r))return t.indexOf(r)!==-1;if(S.isRegExp(r))return r.test(t)}}function x0(e){return e.trim().toLowerCase().replace(/([a-z\d])(\w*)/g,(t,n,r)=>n.toUpperCase()+r)}function C0(e,t){const n=S.toCamelCase(" "+t);["get","set","has"].forEach(r=>{Object.defineProperty(e,r+n,{value:function(l,o,i){return this[r].call(this,t,l,o,i)},configurable:!0})})}class No{constructor(t){t&&this.set(t)}set(t,n,r){const l=this;function o(u,s,a){const c=rr(s);if(!c)throw new Error("header name must be a non-empty string");const d=S.findKey(l,c);(!d||l[d]===void 0||a===!0||a===void 0&&l[d]!==!1)&&(l[d||s]=Nl(u))}const i=(u,s)=>S.forEach(u,(a,c)=>o(a,c,s));return S.isPlainObject(t)||t instanceof this.constructor?i(t,n):S.isString(t)&&(t=t.trim())&&!k0(t)?i(S0(t),n):t!=null&&o(n,t,r),this}get(t,n){if(t=rr(t),t){const r=S.findKey(this,t);if(r){const l=this[r];if(!n)return l;if(n===!0)return E0(l);if(S.isFunction(n))return n.call(this,l,r);if(S.isRegExp(n))return n.exec(l);throw new TypeError("parser must be boolean|regexp|function")}}}has(t,n){if(t=rr(t),t){const r=S.findKey(this,t);return!!(r&&this[r]!==void 0&&(!n||li(this,this[r],r,n)))}return!1}delete(t,n){const r=this;let l=!1;function o(i){if(i=rr(i),i){const u=S.findKey(r,i);u&&(!n||li(r,r[u],u,n))&&(delete r[u],l=!0)}}return S.isArray(t)?t.forEach(o):o(t),l}clear(t){const n=Object.keys(this);let r=n.length,l=!1;for(;r--;){const o=n[r];(!t||li(this,this[o],o,t,!0))&&(delete this[o],l=!0)}return l}normalize(t){const n=this,r={};return S.forEach(this,(l,o)=>{const i=S.findKey(r,o);if(i){n[i]=Nl(l),delete n[o];return}const u=t?x0(o):String(o).trim();u!==o&&delete n[o],n[u]=Nl(l),r[u]=!0}),this}concat(...t){return this.constructor.concat(this,...t)}toJSON(t){const n=Object.create(null);return S.forEach(this,(r,l)=>{r!=null&&r!==!1&&(n[l]=t&&S.isArray(r)?r.join(", "):r)}),n}[Symbol.iterator](){return Object.entries(this.toJSON())[Symbol.iterator]()}toString(){return Object.entries(this.toJSON()).map(([t,n])=>t+": "+n).join(` -`)}get[Symbol.toStringTag](){return"AxiosHeaders"}static from(t){return t instanceof this?t:new this(t)}static concat(t,...n){const r=new this(t);return n.forEach(l=>r.set(l)),r}static accessor(t){const r=(this[Ha]=this[Ha]={accessors:{}}).accessors,l=this.prototype;function o(i){const u=rr(i);r[u]||(C0(l,i),r[u]=!0)}return S.isArray(t)?t.forEach(o):o(t),this}}No.accessor(["Content-Type","Content-Length","Accept","Accept-Encoding","User-Agent","Authorization"]);S.freezeMethods(No.prototype);S.freezeMethods(No);const ht=No;function oi(e,t){const n=this||ws,r=t||n,l=ht.from(r.headers);let o=r.data;return S.forEach(e,function(u){o=u.call(n,o,l.normalize(),t?t.status:void 0)}),l.normalize(),o}function Rd(e){return!!(e&&e.__CANCEL__)}function Wr(e,t,n){z.call(this,e??"canceled",z.ERR_CANCELED,t,n),this.name="CanceledError"}S.inherits(Wr,z,{__CANCEL__:!0});function T0(e,t,n){const r=n.config.validateStatus;!n.status||!r||r(n.status)?e(n):t(new z("Request failed with status code "+n.status,[z.ERR_BAD_REQUEST,z.ERR_BAD_RESPONSE][Math.floor(n.status/100)-4],n.config,n.request,n))}const N0=tt.isStandardBrowserEnv?function(){return{write:function(n,r,l,o,i,u){const s=[];s.push(n+"="+encodeURIComponent(r)),S.isNumber(l)&&s.push("expires="+new Date(l).toGMTString()),S.isString(o)&&s.push("path="+o),S.isString(i)&&s.push("domain="+i),u===!0&&s.push("secure"),document.cookie=s.join("; ")},read:function(n){const r=document.cookie.match(new RegExp("(^|;\\s*)("+n+")=([^;]*)"));return r?decodeURIComponent(r[3]):null},remove:function(n){this.write(n,"",Date.now()-864e5)}}}():function(){return{write:function(){},read:function(){return null},remove:function(){}}}();function P0(e){return/^([a-z][a-z\d+\-.]*:)?\/\//i.test(e)}function L0(e,t){return t?e.replace(/\/+$/,"")+"/"+t.replace(/^\/+/,""):e}function Od(e,t){return e&&!P0(t)?L0(e,t):t}const _0=tt.isStandardBrowserEnv?function(){const t=/(msie|trident)/i.test(navigator.userAgent),n=document.createElement("a");let r;function l(o){let i=o;return t&&(n.setAttribute("href",i),i=n.href),n.setAttribute("href",i),{href:n.href,protocol:n.protocol?n.protocol.replace(/:$/,""):"",host:n.host,search:n.search?n.search.replace(/^\?/,""):"",hash:n.hash?n.hash.replace(/^#/,""):"",hostname:n.hostname,port:n.port,pathname:n.pathname.charAt(0)==="/"?n.pathname:"/"+n.pathname}}return r=l(window.location.href),function(i){const u=S.isString(i)?l(i):i;return u.protocol===r.protocol&&u.host===r.host}}():function(){return function(){return!0}}();function R0(e){const t=/^([-+\w]{1,25})(:?\/\/|:)/.exec(e);return t&&t[1]||""}function O0(e,t){e=e||10;const n=new Array(e),r=new Array(e);let l=0,o=0,i;return t=t!==void 0?t:1e3,function(s){const a=Date.now(),c=r[o];i||(i=a),n[l]=s,r[l]=a;let d=o,h=0;for(;d!==l;)h+=n[d++],d=d%e;if(l=(l+1)%e,l===o&&(o=(o+1)%e),a-i{const o=l.loaded,i=l.lengthComputable?l.total:void 0,u=o-n,s=r(u),a=o<=i;n=o;const c={loaded:o,total:i,progress:i?o/i:void 0,bytes:u,rate:s||void 0,estimated:s&&i&&a?(i-o)/s:void 0,event:l};c[t?"download":"upload"]=!0,e(c)}}const F0=typeof XMLHttpRequest<"u",A0=F0&&function(e){return new Promise(function(n,r){let l=e.data;const o=ht.from(e.headers).normalize(),i=e.responseType;let u;function s(){e.cancelToken&&e.cancelToken.unsubscribe(u),e.signal&&e.signal.removeEventListener("abort",u)}S.isFormData(l)&&(tt.isStandardBrowserEnv||tt.isStandardBrowserWebWorkerEnv?o.setContentType(!1):o.setContentType("multipart/form-data;",!1));let a=new XMLHttpRequest;if(e.auth){const y=e.auth.username||"",m=e.auth.password?unescape(encodeURIComponent(e.auth.password)):"";o.set("Authorization","Basic "+btoa(y+":"+m))}const c=Od(e.baseURL,e.url);a.open(e.method.toUpperCase(),Pd(c,e.params,e.paramsSerializer),!0),a.timeout=e.timeout;function d(){if(!a)return;const y=ht.from("getAllResponseHeaders"in a&&a.getAllResponseHeaders()),v={data:!i||i==="text"||i==="json"?a.responseText:a.response,status:a.status,statusText:a.statusText,headers:y,config:e,request:a};T0(function(p){n(p),s()},function(p){r(p),s()},v),a=null}if("onloadend"in a?a.onloadend=d:a.onreadystatechange=function(){!a||a.readyState!==4||a.status===0&&!(a.responseURL&&a.responseURL.indexOf("file:")===0)||setTimeout(d)},a.onabort=function(){a&&(r(new z("Request aborted",z.ECONNABORTED,e,a)),a=null)},a.onerror=function(){r(new z("Network Error",z.ERR_NETWORK,e,a)),a=null},a.ontimeout=function(){let m=e.timeout?"timeout of "+e.timeout+"ms exceeded":"timeout exceeded";const v=e.transitional||Ld;e.timeoutErrorMessage&&(m=e.timeoutErrorMessage),r(new z(m,v.clarifyTimeoutError?z.ETIMEDOUT:z.ECONNABORTED,e,a)),a=null},tt.isStandardBrowserEnv){const y=(e.withCredentials||_0(c))&&e.xsrfCookieName&&N0.read(e.xsrfCookieName);y&&o.set(e.xsrfHeaderName,y)}l===void 0&&o.setContentType(null),"setRequestHeader"in a&&S.forEach(o.toJSON(),function(m,v){a.setRequestHeader(v,m)}),S.isUndefined(e.withCredentials)||(a.withCredentials=!!e.withCredentials),i&&i!=="json"&&(a.responseType=e.responseType),typeof e.onDownloadProgress=="function"&&a.addEventListener("progress",Va(e.onDownloadProgress,!0)),typeof e.onUploadProgress=="function"&&a.upload&&a.upload.addEventListener("progress",Va(e.onUploadProgress)),(e.cancelToken||e.signal)&&(u=y=>{a&&(r(!y||y.type?new Wr(null,e,a):y),a.abort(),a=null)},e.cancelToken&&e.cancelToken.subscribe(u),e.signal&&(e.signal.aborted?u():e.signal.addEventListener("abort",u)));const h=R0(c);if(h&&tt.protocols.indexOf(h)===-1){r(new z("Unsupported protocol "+h+":",z.ERR_BAD_REQUEST,e));return}a.send(l||null)})},Pl={http:l0,xhr:A0};S.forEach(Pl,(e,t)=>{if(e){try{Object.defineProperty(e,"name",{value:t})}catch{}Object.defineProperty(e,"adapterName",{value:t})}});const $0={getAdapter:e=>{e=S.isArray(e)?e:[e];const{length:t}=e;let n,r;for(let l=0;le instanceof ht?e.toJSON():e;function Bn(e,t){t=t||{};const n={};function r(a,c,d){return S.isPlainObject(a)&&S.isPlainObject(c)?S.merge.call({caseless:d},a,c):S.isPlainObject(c)?S.merge({},c):S.isArray(c)?c.slice():c}function l(a,c,d){if(S.isUndefined(c)){if(!S.isUndefined(a))return r(void 0,a,d)}else return r(a,c,d)}function o(a,c){if(!S.isUndefined(c))return r(void 0,c)}function i(a,c){if(S.isUndefined(c)){if(!S.isUndefined(a))return r(void 0,a)}else return r(void 0,c)}function u(a,c,d){if(d in t)return r(a,c);if(d in e)return r(void 0,a)}const s={url:o,method:o,data:o,baseURL:i,transformRequest:i,transformResponse:i,paramsSerializer:i,timeout:i,timeoutMessage:i,withCredentials:i,adapter:i,responseType:i,xsrfCookieName:i,xsrfHeaderName:i,onUploadProgress:i,onDownloadProgress:i,decompress:i,maxContentLength:i,maxBodyLength:i,beforeRedirect:i,transport:i,httpAgent:i,httpsAgent:i,cancelToken:i,socketPath:i,responseEncoding:i,validateStatus:u,headers:(a,c)=>l(Qa(a),Qa(c),!0)};return S.forEach(Object.keys(Object.assign({},e,t)),function(c){const d=s[c]||l,h=d(e[c],t[c],c);S.isUndefined(h)&&d!==u||(n[c]=h)}),n}const Fd="1.4.0",Ss={};["object","boolean","number","function","string","symbol"].forEach((e,t)=>{Ss[e]=function(r){return typeof r===e||"a"+(t<1?"n ":" ")+e}});const ba={};Ss.transitional=function(t,n,r){function l(o,i){return"[Axios v"+Fd+"] Transitional option '"+o+"'"+i+(r?". "+r:"")}return(o,i,u)=>{if(t===!1)throw new z(l(i," has been removed"+(n?" in "+n:"")),z.ERR_DEPRECATED);return n&&!ba[i]&&(ba[i]=!0,console.warn(l(i," has been deprecated since v"+n+" and will be removed in the near future"))),t?t(o,i,u):!0}};function D0(e,t,n){if(typeof e!="object")throw new z("options must be an object",z.ERR_BAD_OPTION_VALUE);const r=Object.keys(e);let l=r.length;for(;l-- >0;){const o=r[l],i=t[o];if(i){const u=e[o],s=u===void 0||i(u,o,e);if(s!==!0)throw new z("option "+o+" must be "+s,z.ERR_BAD_OPTION_VALUE);continue}if(n!==!0)throw new z("Unknown option "+o,z.ERR_BAD_OPTION)}}const pu={assertOptions:D0,validators:Ss},Ct=pu.validators;class eo{constructor(t){this.defaults=t,this.interceptors={request:new Ba,response:new Ba}}request(t,n){typeof t=="string"?(n=n||{},n.url=t):n=t||{},n=Bn(this.defaults,n);const{transitional:r,paramsSerializer:l,headers:o}=n;r!==void 0&&pu.assertOptions(r,{silentJSONParsing:Ct.transitional(Ct.boolean),forcedJSONParsing:Ct.transitional(Ct.boolean),clarifyTimeoutError:Ct.transitional(Ct.boolean)},!1),l!=null&&(S.isFunction(l)?n.paramsSerializer={serialize:l}:pu.assertOptions(l,{encode:Ct.function,serialize:Ct.function},!0)),n.method=(n.method||this.defaults.method||"get").toLowerCase();let i;i=o&&S.merge(o.common,o[n.method]),i&&S.forEach(["delete","get","head","post","put","patch","common"],m=>{delete o[m]}),n.headers=ht.concat(i,o);const u=[];let s=!0;this.interceptors.request.forEach(function(v){typeof v.runWhen=="function"&&v.runWhen(n)===!1||(s=s&&v.synchronous,u.unshift(v.fulfilled,v.rejected))});const a=[];this.interceptors.response.forEach(function(v){a.push(v.fulfilled,v.rejected)});let c,d=0,h;if(!s){const m=[Wa.bind(this),void 0];for(m.unshift.apply(m,u),m.push.apply(m,a),h=m.length,c=Promise.resolve(n);d{if(!r._listeners)return;let o=r._listeners.length;for(;o-- >0;)r._listeners[o](l);r._listeners=null}),this.promise.then=l=>{let o;const i=new Promise(u=>{r.subscribe(u),o=u}).then(l);return i.cancel=function(){r.unsubscribe(o)},i},t(function(o,i,u){r.reason||(r.reason=new Wr(o,i,u),n(r.reason))})}throwIfRequested(){if(this.reason)throw this.reason}subscribe(t){if(this.reason){t(this.reason);return}this._listeners?this._listeners.push(t):this._listeners=[t]}unsubscribe(t){if(!this._listeners)return;const n=this._listeners.indexOf(t);n!==-1&&this._listeners.splice(n,1)}static source(){let t;return{token:new Es(function(l){t=l}),cancel:t}}}const z0=Es;function M0(e){return function(n){return e.apply(null,n)}}function j0(e){return S.isObject(e)&&e.isAxiosError===!0}const mu={Continue:100,SwitchingProtocols:101,Processing:102,EarlyHints:103,Ok:200,Created:201,Accepted:202,NonAuthoritativeInformation:203,NoContent:204,ResetContent:205,PartialContent:206,MultiStatus:207,AlreadyReported:208,ImUsed:226,MultipleChoices:300,MovedPermanently:301,Found:302,SeeOther:303,NotModified:304,UseProxy:305,Unused:306,TemporaryRedirect:307,PermanentRedirect:308,BadRequest:400,Unauthorized:401,PaymentRequired:402,Forbidden:403,NotFound:404,MethodNotAllowed:405,NotAcceptable:406,ProxyAuthenticationRequired:407,RequestTimeout:408,Conflict:409,Gone:410,LengthRequired:411,PreconditionFailed:412,PayloadTooLarge:413,UriTooLong:414,UnsupportedMediaType:415,RangeNotSatisfiable:416,ExpectationFailed:417,ImATeapot:418,MisdirectedRequest:421,UnprocessableEntity:422,Locked:423,FailedDependency:424,TooEarly:425,UpgradeRequired:426,PreconditionRequired:428,TooManyRequests:429,RequestHeaderFieldsTooLarge:431,UnavailableForLegalReasons:451,InternalServerError:500,NotImplemented:501,BadGateway:502,ServiceUnavailable:503,GatewayTimeout:504,HttpVersionNotSupported:505,VariantAlsoNegotiates:506,InsufficientStorage:507,LoopDetected:508,NotExtended:510,NetworkAuthenticationRequired:511};Object.entries(mu).forEach(([e,t])=>{mu[t]=e});const U0=mu;function Ad(e){const t=new Ll(e),n=hd(Ll.prototype.request,t);return S.extend(n,Ll.prototype,t,{allOwnKeys:!0}),S.extend(n,t,null,{allOwnKeys:!0}),n.create=function(l){return Ad(Bn(e,l))},n}const oe=Ad(ws);oe.Axios=Ll;oe.CanceledError=Wr;oe.CancelToken=z0;oe.isCancel=Rd;oe.VERSION=Fd;oe.toFormData=Co;oe.AxiosError=z;oe.Cancel=oe.CanceledError;oe.all=function(t){return Promise.all(t)};oe.spread=M0;oe.isAxiosError=j0;oe.mergeConfig=Bn;oe.AxiosHeaders=ht;oe.formToJSON=e=>_d(S.isHTMLForm(e)?new FormData(e):e);oe.HttpStatusCode=U0;oe.default=oe;const I0=oe;var B0=Object.defineProperty,H0=(e,t,n)=>t in e?B0(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n,ui=(e,t,n)=>(H0(e,typeof t!="symbol"?t+"":t,n),n);let V0=class{constructor(){ui(this,"current",this.detect()),ui(this,"handoffState","pending"),ui(this,"currentId",0)}set(t){this.current!==t&&(this.handoffState="pending",this.currentId=0,this.current=t)}reset(){this.set(this.detect())}nextId(){return++this.currentId}get isServer(){return this.current==="server"}get isClient(){return this.current==="client"}detect(){return typeof window>"u"||typeof document>"u"?"server":"client"}handoff(){this.handoffState==="pending"&&(this.handoffState="complete")}get isHandoffComplete(){return this.handoffState==="complete"}},gt=new V0,ot=(e,t)=>{gt.isServer?w.useEffect(e,t):w.useLayoutEffect(e,t)};function vt(e){let t=w.useRef(e);return ot(()=>{t.current=e},[e]),t}function Qr(e){typeof queueMicrotask=="function"?queueMicrotask(e):Promise.resolve().then(e).catch(t=>setTimeout(()=>{throw t}))}function bn(){let e=[],t={addEventListener(n,r,l,o){return n.addEventListener(r,l,o),t.add(()=>n.removeEventListener(r,l,o))},requestAnimationFrame(...n){let r=requestAnimationFrame(...n);return t.add(()=>cancelAnimationFrame(r))},nextFrame(...n){return t.requestAnimationFrame(()=>t.requestAnimationFrame(...n))},setTimeout(...n){let r=setTimeout(...n);return t.add(()=>clearTimeout(r))},microTask(...n){let r={current:!0};return Qr(()=>{r.current&&n[0]()}),t.add(()=>{r.current=!1})},style(n,r,l){let o=n.style.getPropertyValue(r);return Object.assign(n.style,{[r]:l}),this.add(()=>{Object.assign(n.style,{[r]:o})})},group(n){let r=bn();return n(r),this.add(()=>r.dispose())},add(n){return e.push(n),()=>{let r=e.indexOf(n);if(r>=0)for(let l of e.splice(r,1))l()}},dispose(){for(let n of e.splice(0))n()}};return t}function ks(){let[e]=w.useState(bn);return w.useEffect(()=>()=>e.dispose(),[e]),e}let ue=function(e){let t=vt(e);return D.useCallback((...n)=>t.current(...n),[t])};function Kn(){let[e,t]=w.useState(gt.isHandoffComplete);return e&>.isHandoffComplete===!1&&t(!1),w.useEffect(()=>{e!==!0&&t(!0)},[e]),w.useEffect(()=>gt.handoff(),[]),e}var Ka;let Gn=(Ka=D.useId)!=null?Ka:function(){let e=Kn(),[t,n]=D.useState(e?()=>gt.nextId():null);return ot(()=>{t===null&&n(gt.nextId())},[t]),t!=null?""+t:void 0};function he(e,t,...n){if(e in t){let l=t[e];return typeof l=="function"?l(...n):l}let r=new Error(`Tried to handle "${e}" but there is no handler defined. Only defined handlers are: ${Object.keys(t).map(l=>`"${l}"`).join(", ")}.`);throw Error.captureStackTrace&&Error.captureStackTrace(r,he),r}function $d(e){return gt.isServer?null:e instanceof Node?e.ownerDocument:e!=null&&e.hasOwnProperty("current")&&e.current instanceof Node?e.current.ownerDocument:document}let hu=["[contentEditable=true]","[tabindex]","a[href]","area[href]","button:not([disabled])","iframe","input:not([disabled])","select:not([disabled])","textarea:not([disabled])"].map(e=>`${e}:not([tabindex='-1'])`).join(",");var Xt=(e=>(e[e.First=1]="First",e[e.Previous=2]="Previous",e[e.Next=4]="Next",e[e.Last=8]="Last",e[e.WrapAround=16]="WrapAround",e[e.NoScroll=32]="NoScroll",e))(Xt||{}),Dd=(e=>(e[e.Error=0]="Error",e[e.Overflow=1]="Overflow",e[e.Success=2]="Success",e[e.Underflow=3]="Underflow",e))(Dd||{}),W0=(e=>(e[e.Previous=-1]="Previous",e[e.Next=1]="Next",e))(W0||{});function Q0(e=document.body){return e==null?[]:Array.from(e.querySelectorAll(hu)).sort((t,n)=>Math.sign((t.tabIndex||Number.MAX_SAFE_INTEGER)-(n.tabIndex||Number.MAX_SAFE_INTEGER)))}var zd=(e=>(e[e.Strict=0]="Strict",e[e.Loose=1]="Loose",e))(zd||{});function b0(e,t=0){var n;return e===((n=$d(e))==null?void 0:n.body)?!1:he(t,{[0](){return e.matches(hu)},[1](){let r=e;for(;r!==null;){if(r.matches(hu))return!0;r=r.parentElement}return!1}})}var K0=(e=>(e[e.Keyboard=0]="Keyboard",e[e.Mouse=1]="Mouse",e))(K0||{});typeof window<"u"&&typeof document<"u"&&(document.addEventListener("keydown",e=>{e.metaKey||e.altKey||e.ctrlKey||(document.documentElement.dataset.headlessuiFocusVisible="")},!0),document.addEventListener("click",e=>{e.detail===1?delete document.documentElement.dataset.headlessuiFocusVisible:e.detail===0&&(document.documentElement.dataset.headlessuiFocusVisible="")},!0));function ln(e){e==null||e.focus({preventScroll:!0})}let G0=["textarea","input"].join(",");function q0(e){var t,n;return(n=(t=e==null?void 0:e.matches)==null?void 0:t.call(e,G0))!=null?n:!1}function Y0(e,t=n=>n){return e.slice().sort((n,r)=>{let l=t(n),o=t(r);if(l===null||o===null)return 0;let i=l.compareDocumentPosition(o);return i&Node.DOCUMENT_POSITION_FOLLOWING?-1:i&Node.DOCUMENT_POSITION_PRECEDING?1:0})}function _l(e,t,{sorted:n=!0,relativeTo:r=null,skipElements:l=[]}={}){let o=Array.isArray(e)?e.length>0?e[0].ownerDocument:document:e.ownerDocument,i=Array.isArray(e)?n?Y0(e):e:Q0(e);l.length>0&&i.length>1&&(i=i.filter(y=>!l.includes(y))),r=r??o.activeElement;let u=(()=>{if(t&5)return 1;if(t&10)return-1;throw new Error("Missing Focus.First, Focus.Previous, Focus.Next or Focus.Last")})(),s=(()=>{if(t&1)return 0;if(t&2)return Math.max(0,i.indexOf(r))-1;if(t&4)return Math.max(0,i.indexOf(r))+1;if(t&8)return i.length-1;throw new Error("Missing Focus.First, Focus.Previous, Focus.Next or Focus.Last")})(),a=t&32?{preventScroll:!0}:{},c=0,d=i.length,h;do{if(c>=d||c+d<=0)return 0;let y=s+c;if(t&16)y=(y+d)%d;else{if(y<0)return 3;if(y>=d)return 1}h=i[y],h==null||h.focus(a),c+=u}while(h!==o.activeElement);return t&6&&q0(h)&&h.select(),2}function si(e,t,n){let r=vt(t);w.useEffect(()=>{function l(o){r.current(o)}return document.addEventListener(e,l,n),()=>document.removeEventListener(e,l,n)},[e,n])}function X0(e,t,n=!0){let r=w.useRef(!1);w.useEffect(()=>{requestAnimationFrame(()=>{r.current=n})},[n]);function l(i,u){if(!r.current||i.defaultPrevented)return;let s=function c(d){return typeof d=="function"?c(d()):Array.isArray(d)||d instanceof Set?d:[d]}(e),a=u(i);if(a!==null&&a.getRootNode().contains(a)){for(let c of s){if(c===null)continue;let d=c instanceof HTMLElement?c:c.current;if(d!=null&&d.contains(a)||i.composed&&i.composedPath().includes(d))return}return!b0(a,zd.Loose)&&a.tabIndex!==-1&&i.preventDefault(),t(i,a)}}let o=w.useRef(null);si("mousedown",i=>{var u,s;r.current&&(o.current=((s=(u=i.composedPath)==null?void 0:u.call(i))==null?void 0:s[0])||i.target)},!0),si("click",i=>{o.current&&(l(i,()=>o.current),o.current=null)},!0),si("blur",i=>l(i,()=>window.document.activeElement instanceof HTMLIFrameElement?window.document.activeElement:null),!0)}let Md=Symbol();function J0(e,t=!0){return Object.assign(e,{[Md]:t})}function Xe(...e){let t=w.useRef(e);w.useEffect(()=>{t.current=e},[e]);let n=ue(r=>{for(let l of t.current)l!=null&&(typeof l=="function"?l(r):l.current=r)});return e.every(r=>r==null||(r==null?void 0:r[Md]))?void 0:n}function gu(...e){return e.filter(Boolean).join(" ")}var to=(e=>(e[e.None=0]="None",e[e.RenderStrategy=1]="RenderStrategy",e[e.Static=2]="Static",e))(to||{}),pt=(e=>(e[e.Unmount=0]="Unmount",e[e.Hidden=1]="Hidden",e))(pt||{});function Be({ourProps:e,theirProps:t,slot:n,defaultTag:r,features:l,visible:o=!0,name:i}){let u=jd(t,e);if(o)return dl(u,n,r,i);let s=l??0;if(s&2){let{static:a=!1,...c}=u;if(a)return dl(c,n,r,i)}if(s&1){let{unmount:a=!0,...c}=u;return he(a?0:1,{[0](){return null},[1](){return dl({...c,hidden:!0,style:{display:"none"}},n,r,i)}})}return dl(u,n,r,i)}function dl(e,t={},n,r){let{as:l=n,children:o,refName:i="ref",...u}=ai(e,["unmount","static"]),s=e.ref!==void 0?{[i]:e.ref}:{},a=typeof o=="function"?o(t):o;"className"in u&&u.className&&typeof u.className=="function"&&(u.className=u.className(t));let c={};if(t){let d=!1,h=[];for(let[y,m]of Object.entries(t))typeof m=="boolean"&&(d=!0),m===!0&&h.push(y);d&&(c["data-headlessui-state"]=h.join(" "))}if(l===w.Fragment&&Object.keys(Ga(u)).length>0){if(!w.isValidElement(a)||Array.isArray(a)&&a.length>1)throw new Error(['Passing props on "Fragment"!',"",`The current component <${r} /> is rendering a "Fragment".`,"However we need to passthrough the following props:",Object.keys(u).map(m=>` - ${m}`).join(` -`),"","You can apply a few solutions:",['Add an `as="..."` prop, to ensure that we render an actual element instead of a "Fragment".',"Render a single element as the child so that we can forward the props onto that element."].map(m=>` - ${m}`).join(` -`)].join(` -`));let d=a.props,h=typeof(d==null?void 0:d.className)=="function"?(...m)=>gu(d==null?void 0:d.className(...m),u.className):gu(d==null?void 0:d.className,u.className),y=h?{className:h}:{};return w.cloneElement(a,Object.assign({},jd(a.props,Ga(ai(u,["ref"]))),c,s,Z0(a.ref,s.ref),y))}return w.createElement(l,Object.assign({},ai(u,["ref"]),l!==w.Fragment&&s,l!==w.Fragment&&c),a)}function Z0(...e){return{ref:e.every(t=>t==null)?void 0:t=>{for(let n of e)n!=null&&(typeof n=="function"?n(t):n.current=t)}}}function jd(...e){if(e.length===0)return{};if(e.length===1)return e[0];let t={},n={};for(let r of e)for(let l in r)l.startsWith("on")&&typeof r[l]=="function"?(n[l]!=null||(n[l]=[]),n[l].push(r[l])):t[l]=r[l];if(t.disabled||t["aria-disabled"])return Object.assign(t,Object.fromEntries(Object.keys(n).map(r=>[r,void 0])));for(let r in n)Object.assign(t,{[r](l,...o){let i=n[r];for(let u of i){if((l instanceof Event||(l==null?void 0:l.nativeEvent)instanceof Event)&&l.defaultPrevented)return;u(l,...o)}}});return t}function Ae(e){var t;return Object.assign(w.forwardRef(e),{displayName:(t=e.displayName)!=null?t:e.name})}function Ga(e){let t=Object.assign({},e);for(let n in t)t[n]===void 0&&delete t[n];return t}function ai(e,t=[]){let n=Object.assign({},e);for(let r of t)r in n&&delete n[r];return n}function e1(e){let t=e.parentElement,n=null;for(;t&&!(t instanceof HTMLFieldSetElement);)t instanceof HTMLLegendElement&&(n=t),t=t.parentElement;let r=(t==null?void 0:t.getAttribute("disabled"))==="";return r&&t1(n)?!1:r}function t1(e){if(!e)return!1;let t=e.previousElementSibling;for(;t!==null;){if(t instanceof HTMLLegendElement)return!1;t=t.previousElementSibling}return!0}let n1="div";var no=(e=>(e[e.None=1]="None",e[e.Focusable=2]="Focusable",e[e.Hidden=4]="Hidden",e))(no||{});function r1(e,t){let{features:n=1,...r}=e,l={ref:t,"aria-hidden":(n&2)===2?!0:void 0,style:{position:"fixed",top:1,left:1,width:1,height:0,padding:0,margin:-1,overflow:"hidden",clip:"rect(0, 0, 0, 0)",whiteSpace:"nowrap",borderWidth:"0",...(n&4)===4&&(n&2)!==2&&{display:"none"}}};return Be({ourProps:l,theirProps:r,slot:{},defaultTag:n1,name:"Hidden"})}let vu=Ae(r1),xs=w.createContext(null);xs.displayName="OpenClosedContext";var Pe=(e=>(e[e.Open=1]="Open",e[e.Closed=2]="Closed",e[e.Closing=4]="Closing",e[e.Opening=8]="Opening",e))(Pe||{});function Cs(){return w.useContext(xs)}function l1({value:e,children:t}){return D.createElement(xs.Provider,{value:e},t)}var Ud=(e=>(e.Space=" ",e.Enter="Enter",e.Escape="Escape",e.Backspace="Backspace",e.Delete="Delete",e.ArrowLeft="ArrowLeft",e.ArrowUp="ArrowUp",e.ArrowRight="ArrowRight",e.ArrowDown="ArrowDown",e.Home="Home",e.End="End",e.PageUp="PageUp",e.PageDown="PageDown",e.Tab="Tab",e))(Ud||{});function Ts(e,t){let n=w.useRef([]),r=ue(e);w.useEffect(()=>{let l=[...n.current];for(let[o,i]of t.entries())if(n.current[o]!==i){let u=r(t,l);return n.current=t,u}},[r,...t])}function o1(){return/iPhone/gi.test(window.navigator.platform)||/Mac/gi.test(window.navigator.platform)&&window.navigator.maxTouchPoints>0}function i1(e,t,n){let r=vt(t);w.useEffect(()=>{function l(o){r.current(o)}return window.addEventListener(e,l,n),()=>window.removeEventListener(e,l,n)},[e,n])}var cr=(e=>(e[e.Forwards=0]="Forwards",e[e.Backwards=1]="Backwards",e))(cr||{});function u1(){let e=w.useRef(0);return i1("keydown",t=>{t.key==="Tab"&&(e.current=t.shiftKey?1:0)},!0),e}function br(){let e=w.useRef(!1);return ot(()=>(e.current=!0,()=>{e.current=!1}),[]),e}function Po(...e){return w.useMemo(()=>$d(...e),[...e])}function Id(e,t,n,r){let l=vt(n);w.useEffect(()=>{e=e??window;function o(i){l.current(i)}return e.addEventListener(t,o,r),()=>e.removeEventListener(t,o,r)},[e,t,r])}function s1(e){function t(){document.readyState!=="loading"&&(e(),document.removeEventListener("DOMContentLoaded",t))}typeof window<"u"&&typeof document<"u"&&(document.addEventListener("DOMContentLoaded",t),t())}function Bd(e){if(!e)return new Set;if(typeof e=="function")return new Set(e());let t=new Set;for(let n of e.current)n.current instanceof HTMLElement&&t.add(n.current);return t}let a1="div";var Hd=(e=>(e[e.None=1]="None",e[e.InitialFocus=2]="InitialFocus",e[e.TabLock=4]="TabLock",e[e.FocusLock=8]="FocusLock",e[e.RestoreFocus=16]="RestoreFocus",e[e.All=30]="All",e))(Hd||{});function c1(e,t){let n=w.useRef(null),r=Xe(n,t),{initialFocus:l,containers:o,features:i=30,...u}=e;Kn()||(i=1);let s=Po(n);p1({ownerDocument:s},!!(i&16));let a=m1({ownerDocument:s,container:n,initialFocus:l},!!(i&2));h1({ownerDocument:s,container:n,containers:o,previousActiveElement:a},!!(i&8));let c=u1(),d=ue(v=>{let P=n.current;P&&(p=>p())(()=>{he(c.current,{[cr.Forwards]:()=>{_l(P,Xt.First,{skipElements:[v.relatedTarget]})},[cr.Backwards]:()=>{_l(P,Xt.Last,{skipElements:[v.relatedTarget]})}})})}),h=ks(),y=w.useRef(!1),m={ref:r,onKeyDown(v){v.key=="Tab"&&(y.current=!0,h.requestAnimationFrame(()=>{y.current=!1}))},onBlur(v){let P=Bd(o);n.current instanceof HTMLElement&&P.add(n.current);let p=v.relatedTarget;p instanceof HTMLElement&&p.dataset.headlessuiFocusGuard!=="true"&&(Vd(P,p)||(y.current?_l(n.current,he(c.current,{[cr.Forwards]:()=>Xt.Next,[cr.Backwards]:()=>Xt.Previous})|Xt.WrapAround,{relativeTo:v.target}):v.target instanceof HTMLElement&&ln(v.target)))}};return D.createElement(D.Fragment,null,!!(i&4)&&D.createElement(vu,{as:"button",type:"button","data-headlessui-focus-guard":!0,onFocus:d,features:no.Focusable}),Be({ourProps:m,theirProps:u,defaultTag:a1,name:"FocusTrap"}),!!(i&4)&&D.createElement(vu,{as:"button",type:"button","data-headlessui-focus-guard":!0,onFocus:d,features:no.Focusable}))}let f1=Ae(c1),lr=Object.assign(f1,{features:Hd}),_t=[];s1(()=>{function e(t){t.target instanceof HTMLElement&&t.target!==document.body&&_t[0]!==t.target&&(_t.unshift(t.target),_t=_t.filter(n=>n!=null&&n.isConnected),_t.splice(10))}window.addEventListener("click",e,{capture:!0}),window.addEventListener("mousedown",e,{capture:!0}),window.addEventListener("focus",e,{capture:!0}),document.body.addEventListener("click",e,{capture:!0}),document.body.addEventListener("mousedown",e,{capture:!0}),document.body.addEventListener("focus",e,{capture:!0})});function d1(e=!0){let t=w.useRef(_t.slice());return Ts(([n],[r])=>{r===!0&&n===!1&&Qr(()=>{t.current.splice(0)}),r===!1&&n===!0&&(t.current=_t.slice())},[e,_t,t]),ue(()=>{var n;return(n=t.current.find(r=>r!=null&&r.isConnected))!=null?n:null})}function p1({ownerDocument:e},t){let n=d1(t);Ts(()=>{t||(e==null?void 0:e.activeElement)===(e==null?void 0:e.body)&&ln(n())},[t]);let r=w.useRef(!1);w.useEffect(()=>(r.current=!1,()=>{r.current=!0,Qr(()=>{r.current&&ln(n())})}),[])}function m1({ownerDocument:e,container:t,initialFocus:n},r){let l=w.useRef(null),o=br();return Ts(()=>{if(!r)return;let i=t.current;i&&Qr(()=>{if(!o.current)return;let u=e==null?void 0:e.activeElement;if(n!=null&&n.current){if((n==null?void 0:n.current)===u){l.current=u;return}}else if(i.contains(u)){l.current=u;return}n!=null&&n.current?ln(n.current):_l(i,Xt.First)===Dd.Error&&console.warn("There are no focusable elements inside the "),l.current=e==null?void 0:e.activeElement})},[r]),l}function h1({ownerDocument:e,container:t,containers:n,previousActiveElement:r},l){let o=br();Id(e==null?void 0:e.defaultView,"focus",i=>{if(!l||!o.current)return;let u=Bd(n);t.current instanceof HTMLElement&&u.add(t.current);let s=r.current;if(!s)return;let a=i.target;a&&a instanceof HTMLElement?Vd(u,a)?(r.current=a,ln(a)):(i.preventDefault(),i.stopPropagation(),ln(s)):ln(r.current)},!0)}function Vd(e,t){for(let n of e)if(n.contains(t))return!0;return!1}let Wd=w.createContext(!1);function g1(){return w.useContext(Wd)}function yu(e){return D.createElement(Wd.Provider,{value:e.force},e.children)}function v1(e){let t=g1(),n=w.useContext(Qd),r=Po(e),[l,o]=w.useState(()=>{if(!t&&n!==null||gt.isServer)return null;let i=r==null?void 0:r.getElementById("headlessui-portal-root");if(i)return i;if(r===null)return null;let u=r.createElement("div");return u.setAttribute("id","headlessui-portal-root"),r.body.appendChild(u)});return w.useEffect(()=>{l!==null&&(r!=null&&r.body.contains(l)||r==null||r.body.appendChild(l))},[l,r]),w.useEffect(()=>{t||n!==null&&o(n.current)},[n,o,t]),l}let y1=w.Fragment;function w1(e,t){let n=e,r=w.useRef(null),l=Xe(J0(c=>{r.current=c}),t),o=Po(r),i=v1(r),[u]=w.useState(()=>{var c;return gt.isServer?null:(c=o==null?void 0:o.createElement("div"))!=null?c:null}),s=Kn(),a=w.useRef(!1);return ot(()=>{if(a.current=!1,!(!i||!u))return i.contains(u)||(u.setAttribute("data-headlessui-portal",""),i.appendChild(u)),()=>{a.current=!0,Qr(()=>{var c;a.current&&(!i||!u||(u instanceof Node&&i.contains(u)&&i.removeChild(u),i.childNodes.length<=0&&((c=i.parentElement)==null||c.removeChild(i))))})}},[i,u]),s?!i||!u?null:md.createPortal(Be({ourProps:{ref:l},theirProps:n,defaultTag:y1,name:"Portal"}),u):null}let S1=w.Fragment,Qd=w.createContext(null);function E1(e,t){let{target:n,...r}=e,l={ref:Xe(t)};return D.createElement(Qd.Provider,{value:n},Be({ourProps:l,theirProps:r,defaultTag:S1,name:"Popover.Group"}))}let k1=Ae(w1),x1=Ae(E1),wu=Object.assign(k1,{Group:x1}),bd=w.createContext(null);function Kd(){let e=w.useContext(bd);if(e===null){let t=new Error("You used a component, but it is not inside a relevant parent.");throw Error.captureStackTrace&&Error.captureStackTrace(t,Kd),t}return e}function C1(){let[e,t]=w.useState([]);return[e.length>0?e.join(" "):void 0,w.useMemo(()=>function(n){let r=ue(o=>(t(i=>[...i,o]),()=>t(i=>{let u=i.slice(),s=u.indexOf(o);return s!==-1&&u.splice(s,1),u}))),l=w.useMemo(()=>({register:r,slot:n.slot,name:n.name,props:n.props}),[r,n.slot,n.name,n.props]);return D.createElement(bd.Provider,{value:l},n.children)},[t])]}let T1="p";function N1(e,t){let n=Gn(),{id:r=`headlessui-description-${n}`,...l}=e,o=Kd(),i=Xe(t);ot(()=>o.register(r),[r,o.register]);let u={ref:i,...o.props,id:r};return Be({ourProps:u,theirProps:l,slot:o.slot||{},defaultTag:T1,name:o.name||"Description"})}let P1=Ae(N1),L1=Object.assign(P1,{}),Ns=w.createContext(()=>{});Ns.displayName="StackContext";var Su=(e=>(e[e.Add=0]="Add",e[e.Remove=1]="Remove",e))(Su||{});function _1(){return w.useContext(Ns)}function R1({children:e,onUpdate:t,type:n,element:r,enabled:l}){let o=_1(),i=ue((...u)=>{t==null||t(...u),o(...u)});return ot(()=>{let u=l===void 0||l===!0;return u&&i(0,n,r),()=>{u&&i(1,n,r)}},[i,n,r,l]),D.createElement(Ns.Provider,{value:i},e)}function O1(e,t){return e===t&&(e!==0||1/e===1/t)||e!==e&&t!==t}const F1=typeof Object.is=="function"?Object.is:O1,{useState:A1,useEffect:$1,useLayoutEffect:D1,useDebugValue:z1}=vi;function M1(e,t,n){const r=t(),[{inst:l},o]=A1({inst:{value:r,getSnapshot:t}});return D1(()=>{l.value=r,l.getSnapshot=t,ci(l)&&o({inst:l})},[e,r,t]),$1(()=>(ci(l)&&o({inst:l}),e(()=>{ci(l)&&o({inst:l})})),[e]),z1(r),r}function ci(e){const t=e.getSnapshot,n=e.value;try{const r=t();return!F1(n,r)}catch{return!0}}function j1(e,t,n){return t()}const U1=typeof window<"u"&&typeof window.document<"u"&&typeof window.document.createElement<"u",I1=!U1,B1=I1?j1:M1,H1="useSyncExternalStore"in vi?(e=>e.useSyncExternalStore)(vi):B1;function V1(e){return H1(e.subscribe,e.getSnapshot,e.getSnapshot)}function W1(e,t){let n=e(),r=new Set;return{getSnapshot(){return n},subscribe(l){return r.add(l),()=>r.delete(l)},dispatch(l,...o){let i=t[l].call(n,...o);i&&(n=i,r.forEach(u=>u()))}}}function Q1(){let e;return{before({doc:t}){var n;let r=t.documentElement;e=((n=t.defaultView)!=null?n:window).innerWidth-r.clientWidth},after({doc:t,d:n}){let r=t.documentElement,l=r.clientWidth-r.offsetWidth,o=e-l;n.style(r,"paddingRight",`${o}px`)}}}function b1(){if(!o1())return{};let e;return{before(){e=window.pageYOffset},after({doc:t,d:n,meta:r}){function l(i){return r.containers.flatMap(u=>u()).some(u=>u.contains(i))}n.style(t.body,"marginTop",`-${e}px`),window.scrollTo(0,0);let o=null;n.addEventListener(t,"click",i=>{if(i.target instanceof HTMLElement)try{let u=i.target.closest("a");if(!u)return;let{hash:s}=new URL(u.href),a=t.querySelector(s);a&&!l(a)&&(o=a)}catch{}},!0),n.addEventListener(t,"touchmove",i=>{i.target instanceof HTMLElement&&!l(i.target)&&i.preventDefault()},{passive:!1}),n.add(()=>{window.scrollTo(0,window.pageYOffset+e),o&&o.isConnected&&(o.scrollIntoView({block:"nearest"}),o=null)})}}}function K1(){return{before({doc:e,d:t}){t.style(e.documentElement,"overflow","hidden")}}}function G1(e){let t={};for(let n of e)Object.assign(t,n(t));return t}let tn=W1(()=>new Map,{PUSH(e,t){var n;let r=(n=this.get(e))!=null?n:{doc:e,count:0,d:bn(),meta:new Set};return r.count++,r.meta.add(t),this.set(e,r),this},POP(e,t){let n=this.get(e);return n&&(n.count--,n.meta.delete(t)),this},SCROLL_PREVENT({doc:e,d:t,meta:n}){let r={doc:e,d:t,meta:G1(n)},l=[b1(),Q1(),K1()];l.forEach(({before:o})=>o==null?void 0:o(r)),l.forEach(({after:o})=>o==null?void 0:o(r))},SCROLL_ALLOW({d:e}){e.dispose()},TEARDOWN({doc:e}){this.delete(e)}});tn.subscribe(()=>{let e=tn.getSnapshot(),t=new Map;for(let[n]of e)t.set(n,n.documentElement.style.overflow);for(let n of e.values()){let r=t.get(n.doc)==="hidden",l=n.count!==0;(l&&!r||!l&&r)&&tn.dispatch(n.count>0?"SCROLL_PREVENT":"SCROLL_ALLOW",n),n.count===0&&tn.dispatch("TEARDOWN",n)}});function q1(e,t,n){let r=V1(tn),l=e?r.get(e):void 0,o=l?l.count>0:!1;return ot(()=>{if(!(!e||!t))return tn.dispatch("PUSH",e,n),()=>tn.dispatch("POP",e,n)},[t,e]),o}let fi=new Map,or=new Map;function qa(e,t=!0){ot(()=>{var n;if(!t)return;let r=typeof e=="function"?e():e.current;if(!r)return;function l(){var i;if(!r)return;let u=(i=or.get(r))!=null?i:1;if(u===1?or.delete(r):or.set(r,u-1),u!==1)return;let s=fi.get(r);s&&(s["aria-hidden"]===null?r.removeAttribute("aria-hidden"):r.setAttribute("aria-hidden",s["aria-hidden"]),r.inert=s.inert,fi.delete(r))}let o=(n=or.get(r))!=null?n:0;return or.set(r,o+1),o!==0||(fi.set(r,{"aria-hidden":r.getAttribute("aria-hidden"),inert:r.inert}),r.setAttribute("aria-hidden","true"),r.inert=!0),l},[e,t])}var Y1=(e=>(e[e.Open=0]="Open",e[e.Closed=1]="Closed",e))(Y1||{}),X1=(e=>(e[e.SetTitleId=0]="SetTitleId",e))(X1||{});let J1={[0](e,t){return e.titleId===t.id?e:{...e,titleId:t.id}}},ro=w.createContext(null);ro.displayName="DialogContext";function Kr(e){let t=w.useContext(ro);if(t===null){let n=new Error(`<${e} /> is missing a parent

component.`);throw Error.captureStackTrace&&Error.captureStackTrace(n,Kr),n}return t}function Z1(e,t,n=()=>[document.body]){q1(e,t,r=>{var l;return{containers:[...(l=r.containers)!=null?l:[],n]}})}function eg(e,t){return he(t.type,J1,e,t)}let tg="div",ng=to.RenderStrategy|to.Static;function rg(e,t){let n=Gn(),{id:r=`headlessui-dialog-${n}`,open:l,onClose:o,initialFocus:i,__demoMode:u=!1,...s}=e,[a,c]=w.useState(0),d=Cs();l===void 0&&d!==null&&(l=(d&Pe.Open)===Pe.Open);let h=w.useRef(null),y=Xe(h,t),m=w.useRef(null),v=Po(h),P=e.hasOwnProperty("open")||d!==null,p=e.hasOwnProperty("onClose");if(!P&&!p)throw new Error("You have to provide an `open` and an `onClose` prop to the `Dialog` component.");if(!P)throw new Error("You provided an `onClose` prop to the `Dialog`, but forgot an `open` prop.");if(!p)throw new Error("You provided an `open` prop to the `Dialog`, but forgot an `onClose` prop.");if(typeof l!="boolean")throw new Error(`You provided an \`open\` prop to the \`Dialog\`, but the value is not a boolean. Received: ${l}`);if(typeof o!="function")throw new Error(`You provided an \`onClose\` prop to the \`Dialog\`, but the value is not a function. Received: ${o}`);let f=l?0:1,[g,E]=w.useReducer(eg,{titleId:null,descriptionId:null,panelRef:w.createRef()}),C=ue(()=>o(!1)),L=ue(b=>E({type:0,id:b})),T=Kn()?u?!1:f===0:!1,_=a>1,U=w.useContext(ro)!==null,O=_?"parent":"leaf",V=d!==null?(d&Pe.Closing)===Pe.Closing:!1,He=(()=>U||V?!1:T)(),Ve=w.useCallback(()=>{var b,te;return(te=Array.from((b=v==null?void 0:v.querySelectorAll("body > *"))!=null?b:[]).find(G=>G.id==="headlessui-portal-root"?!1:G.contains(m.current)&&G instanceof HTMLElement))!=null?te:null},[m]);qa(Ve,He);let mn=(()=>_?!0:T)(),it=w.useCallback(()=>{var b,te;return(te=Array.from((b=v==null?void 0:v.querySelectorAll("[data-headlessui-portal]"))!=null?b:[]).find(G=>G.contains(m.current)&&G instanceof HTMLElement))!=null?te:null},[m]);qa(it,mn);let ut=ue(()=>{var b,te;return[...Array.from((b=v==null?void 0:v.querySelectorAll("html > *, body > *, [data-headlessui-portal]"))!=null?b:[]).filter(G=>!(G===document.body||G===document.head||!(G instanceof HTMLElement)||G.contains(m.current)||g.panelRef.current&&G.contains(g.panelRef.current))),(te=g.panelRef.current)!=null?te:h.current]}),bt=(()=>!(!T||_))();X0(()=>ut(),C,bt);let N=(()=>!(_||f!==0))();Id(v==null?void 0:v.defaultView,"keydown",b=>{N&&(b.defaultPrevented||b.key===Ud.Escape&&(b.preventDefault(),b.stopPropagation(),C()))});let F=(()=>!(V||f!==0||U))();Z1(v,F,ut),w.useEffect(()=>{if(f!==0||!h.current)return;let b=new ResizeObserver(te=>{for(let G of te){let Gr=G.target.getBoundingClientRect();Gr.x===0&&Gr.y===0&&Gr.width===0&&Gr.height===0&&C()}});return b.observe(h.current),()=>b.disconnect()},[f,h,C]);let[A,j]=C1(),ee=w.useMemo(()=>[{dialogState:f,close:C,setTitleId:L},g],[f,g,C,L]),Kt=w.useMemo(()=>({open:f===0}),[f]),st={ref:y,id:r,role:"dialog","aria-modal":f===0?!0:void 0,"aria-labelledby":g.titleId,"aria-describedby":A};return D.createElement(R1,{type:"Dialog",enabled:f===0,element:h,onUpdate:ue((b,te)=>{te==="Dialog"&&he(b,{[Su.Add]:()=>c(G=>G+1),[Su.Remove]:()=>c(G=>G-1)})})},D.createElement(yu,{force:!0},D.createElement(wu,null,D.createElement(ro.Provider,{value:ee},D.createElement(wu.Group,{target:h},D.createElement(yu,{force:!1},D.createElement(j,{slot:Kt,name:"Dialog.Description"},D.createElement(lr,{initialFocus:i,containers:ut,features:T?he(O,{parent:lr.features.RestoreFocus,leaf:lr.features.All&~lr.features.FocusLock}):lr.features.None},Be({ourProps:st,theirProps:s,slot:Kt,defaultTag:tg,features:ng,visible:f===0,name:"Dialog"})))))))),D.createElement(vu,{features:no.Hidden,ref:m}))}let lg="div";function og(e,t){let n=Gn(),{id:r=`headlessui-dialog-overlay-${n}`,...l}=e,[{dialogState:o,close:i}]=Kr("Dialog.Overlay"),u=Xe(t),s=ue(c=>{if(c.target===c.currentTarget){if(e1(c.currentTarget))return c.preventDefault();c.preventDefault(),c.stopPropagation(),i()}}),a=w.useMemo(()=>({open:o===0}),[o]);return Be({ourProps:{ref:u,id:r,"aria-hidden":!0,onClick:s},theirProps:l,slot:a,defaultTag:lg,name:"Dialog.Overlay"})}let ig="div";function ug(e,t){let n=Gn(),{id:r=`headlessui-dialog-backdrop-${n}`,...l}=e,[{dialogState:o},i]=Kr("Dialog.Backdrop"),u=Xe(t);w.useEffect(()=>{if(i.panelRef.current===null)throw new Error("A component is being used, but a component is missing.")},[i.panelRef]);let s=w.useMemo(()=>({open:o===0}),[o]);return D.createElement(yu,{force:!0},D.createElement(wu,null,Be({ourProps:{ref:u,id:r,"aria-hidden":!0},theirProps:l,slot:s,defaultTag:ig,name:"Dialog.Backdrop"})))}let sg="div";function ag(e,t){let n=Gn(),{id:r=`headlessui-dialog-panel-${n}`,...l}=e,[{dialogState:o},i]=Kr("Dialog.Panel"),u=Xe(t,i.panelRef),s=w.useMemo(()=>({open:o===0}),[o]),a=ue(c=>{c.stopPropagation()});return Be({ourProps:{ref:u,id:r,onClick:a},theirProps:l,slot:s,defaultTag:sg,name:"Dialog.Panel"})}let cg="h2";function fg(e,t){let n=Gn(),{id:r=`headlessui-dialog-title-${n}`,...l}=e,[{dialogState:o,setTitleId:i}]=Kr("Dialog.Title"),u=Xe(t);w.useEffect(()=>(i(r),()=>i(null)),[r,i]);let s=w.useMemo(()=>({open:o===0}),[o]);return Be({ourProps:{ref:u,id:r},theirProps:l,slot:s,defaultTag:cg,name:"Dialog.Title"})}let dg=Ae(rg),pg=Ae(ug),mg=Ae(ag),hg=Ae(og),gg=Ae(fg),di=Object.assign(dg,{Backdrop:pg,Panel:mg,Overlay:hg,Title:gg,Description:L1});function vg(e=0){let[t,n]=w.useState(e),r=br(),l=w.useCallback(s=>{r.current&&n(a=>a|s)},[t,r]),o=w.useCallback(s=>!!(t&s),[t]),i=w.useCallback(s=>{r.current&&n(a=>a&~s)},[n,r]),u=w.useCallback(s=>{r.current&&n(a=>a^s)},[n]);return{flags:t,addFlag:l,hasFlag:o,removeFlag:i,toggleFlag:u}}function yg(e){let t={called:!1};return(...n)=>{if(!t.called)return t.called=!0,e(...n)}}function pi(e,...t){e&&t.length>0&&e.classList.add(...t)}function mi(e,...t){e&&t.length>0&&e.classList.remove(...t)}function wg(e,t){let n=bn();if(!e)return n.dispose;let{transitionDuration:r,transitionDelay:l}=getComputedStyle(e),[o,i]=[r,l].map(s=>{let[a=0]=s.split(",").filter(Boolean).map(c=>c.includes("ms")?parseFloat(c):parseFloat(c)*1e3).sort((c,d)=>d-c);return a}),u=o+i;if(u!==0){n.group(a=>{a.setTimeout(()=>{t(),a.dispose()},u),a.addEventListener(e,"transitionrun",c=>{c.target===c.currentTarget&&a.dispose()})});let s=n.addEventListener(e,"transitionend",a=>{a.target===a.currentTarget&&(t(),s())})}else t();return n.add(()=>t()),n.dispose}function Sg(e,t,n,r){let l=n?"enter":"leave",o=bn(),i=r!==void 0?yg(r):()=>{};l==="enter"&&(e.removeAttribute("hidden"),e.style.display="");let u=he(l,{enter:()=>t.enter,leave:()=>t.leave}),s=he(l,{enter:()=>t.enterTo,leave:()=>t.leaveTo}),a=he(l,{enter:()=>t.enterFrom,leave:()=>t.leaveFrom});return mi(e,...t.enter,...t.enterTo,...t.enterFrom,...t.leave,...t.leaveFrom,...t.leaveTo,...t.entered),pi(e,...u,...a),o.nextFrame(()=>{mi(e,...a),pi(e,...s),wg(e,()=>(mi(e,...u),pi(e,...t.entered),i()))}),o.dispose}function Eg({container:e,direction:t,classes:n,onStart:r,onStop:l}){let o=br(),i=ks(),u=vt(t);ot(()=>{let s=bn();i.add(s.dispose);let a=e.current;if(a&&u.current!=="idle"&&o.current)return s.dispose(),r.current(u.current),s.add(Sg(a,n.current,u.current==="enter",()=>{s.dispose(),l.current(u.current)})),s.dispose},[t])}function Gt(e=""){return e.split(" ").filter(t=>t.trim().length>1)}let Lo=w.createContext(null);Lo.displayName="TransitionContext";var kg=(e=>(e.Visible="visible",e.Hidden="hidden",e))(kg||{});function xg(){let e=w.useContext(Lo);if(e===null)throw new Error("A is used but it is missing a parent or .");return e}function Cg(){let e=w.useContext(_o);if(e===null)throw new Error("A is used but it is missing a parent or .");return e}let _o=w.createContext(null);_o.displayName="NestingContext";function Ro(e){return"children"in e?Ro(e.children):e.current.filter(({el:t})=>t.current!==null).filter(({state:t})=>t==="visible").length>0}function Gd(e,t){let n=vt(e),r=w.useRef([]),l=br(),o=ks(),i=ue((y,m=pt.Hidden)=>{let v=r.current.findIndex(({el:P})=>P===y);v!==-1&&(he(m,{[pt.Unmount](){r.current.splice(v,1)},[pt.Hidden](){r.current[v].state="hidden"}}),o.microTask(()=>{var P;!Ro(r)&&l.current&&((P=n.current)==null||P.call(n))}))}),u=ue(y=>{let m=r.current.find(({el:v})=>v===y);return m?m.state!=="visible"&&(m.state="visible"):r.current.push({el:y,state:"visible"}),()=>i(y,pt.Unmount)}),s=w.useRef([]),a=w.useRef(Promise.resolve()),c=w.useRef({enter:[],leave:[],idle:[]}),d=ue((y,m,v)=>{s.current.splice(0),t&&(t.chains.current[m]=t.chains.current[m].filter(([P])=>P!==y)),t==null||t.chains.current[m].push([y,new Promise(P=>{s.current.push(P)})]),t==null||t.chains.current[m].push([y,new Promise(P=>{Promise.all(c.current[m].map(([p,f])=>f)).then(()=>P())})]),m==="enter"?a.current=a.current.then(()=>t==null?void 0:t.wait.current).then(()=>v(m)):v(m)}),h=ue((y,m,v)=>{Promise.all(c.current[m].splice(0).map(([P,p])=>p)).then(()=>{var P;(P=s.current.shift())==null||P()}).then(()=>v(m))});return w.useMemo(()=>({children:r,register:u,unregister:i,onStart:d,onStop:h,wait:a,chains:c}),[u,i,r,d,h,c,a])}function Tg(){}let Ng=["beforeEnter","afterEnter","beforeLeave","afterLeave"];function Ya(e){var t;let n={};for(let r of Ng)n[r]=(t=e[r])!=null?t:Tg;return n}function Pg(e){let t=w.useRef(Ya(e));return w.useEffect(()=>{t.current=Ya(e)},[e]),t}let Lg="div",qd=to.RenderStrategy;function _g(e,t){let{beforeEnter:n,afterEnter:r,beforeLeave:l,afterLeave:o,enter:i,enterFrom:u,enterTo:s,entered:a,leave:c,leaveFrom:d,leaveTo:h,...y}=e,m=w.useRef(null),v=Xe(m,t),P=y.unmount?pt.Unmount:pt.Hidden,{show:p,appear:f,initial:g}=xg(),[E,C]=w.useState(p?"visible":"hidden"),L=Cg(),{register:T,unregister:_}=L,U=w.useRef(null);w.useEffect(()=>T(m),[T,m]),w.useEffect(()=>{if(P===pt.Hidden&&m.current){if(p&&E!=="visible"){C("visible");return}return he(E,{hidden:()=>_(m),visible:()=>T(m)})}},[E,m,T,_,p,P]);let O=vt({enter:Gt(i),enterFrom:Gt(u),enterTo:Gt(s),entered:Gt(a),leave:Gt(c),leaveFrom:Gt(d),leaveTo:Gt(h)}),V=Pg({beforeEnter:n,afterEnter:r,beforeLeave:l,afterLeave:o}),He=Kn();w.useEffect(()=>{if(He&&E==="visible"&&m.current===null)throw new Error("Did you forget to passthrough the `ref` to the actual DOM node?")},[m,E,He]);let Ve=g&&!f,mn=(()=>!He||Ve||U.current===p?"idle":p?"enter":"leave")(),it=vg(0),ut=ue(j=>he(j,{enter:()=>{it.addFlag(Pe.Opening),V.current.beforeEnter()},leave:()=>{it.addFlag(Pe.Closing),V.current.beforeLeave()},idle:()=>{}})),bt=ue(j=>he(j,{enter:()=>{it.removeFlag(Pe.Opening),V.current.afterEnter()},leave:()=>{it.removeFlag(Pe.Closing),V.current.afterLeave()},idle:()=>{}})),N=Gd(()=>{C("hidden"),_(m)},L);Eg({container:m,classes:O,direction:mn,onStart:vt(j=>{N.onStart(m,j,ut)}),onStop:vt(j=>{N.onStop(m,j,bt),j==="leave"&&!Ro(N)&&(C("hidden"),_(m))})}),w.useEffect(()=>{Ve&&(P===pt.Hidden?U.current=null:U.current=p)},[p,Ve,E]);let F=y,A={ref:v};return f&&p&&(F={...F,className:gu(y.className,...O.current.enter,...O.current.enterFrom)}),D.createElement(_o.Provider,{value:N},D.createElement(l1,{value:he(E,{visible:Pe.Open,hidden:Pe.Closed})|it.flags},Be({ourProps:A,theirProps:F,defaultTag:Lg,features:qd,visible:E==="visible",name:"Transition.Child"})))}function Rg(e,t){let{show:n,appear:r=!1,unmount:l,...o}=e,i=w.useRef(null),u=Xe(i,t);Kn();let s=Cs();if(n===void 0&&s!==null&&(n=(s&Pe.Open)===Pe.Open),![!0,!1].includes(n))throw new Error("A is used but it is missing a `show={true | false}` prop.");let[a,c]=w.useState(n?"visible":"hidden"),d=Gd(()=>{c("hidden")}),[h,y]=w.useState(!0),m=w.useRef([n]);ot(()=>{h!==!1&&m.current[m.current.length-1]!==n&&(m.current.push(n),y(!1))},[m,n]);let v=w.useMemo(()=>({show:n,appear:r,initial:h}),[n,r,h]);w.useEffect(()=>{if(n)c("visible");else if(!Ro(d))c("hidden");else{let p=i.current;if(!p)return;let f=p.getBoundingClientRect();f.x===0&&f.y===0&&f.width===0&&f.height===0&&c("hidden")}},[n,d]);let P={unmount:l};return D.createElement(_o.Provider,{value:d},D.createElement(Lo.Provider,{value:v},Be({ourProps:{...P,as:w.Fragment,children:D.createElement(Yd,{ref:u,...P,...o})},theirProps:{},defaultTag:w.Fragment,features:qd,visible:a==="visible",name:"Transition"})))}function Og(e,t){let n=w.useContext(Lo)!==null,r=Cs()!==null;return D.createElement(D.Fragment,null,!n&&r?D.createElement(Eu,{ref:t,...e}):D.createElement(Yd,{ref:t,...e}))}let Eu=Ae(Rg),Yd=Ae(_g),Fg=Ae(Og),hi=Object.assign(Eu,{Child:Fg,Root:Eu});function Xd({show:e,onClose:t,onSubmit:n,title:r,content:l,submitText:o}){return x(hi,{appear:!0,show:e,as:w.Fragment,children:B(di,{as:"div",className:"relative z-10",onClose:t,children:[x(hi.Child,{as:w.Fragment,enter:"ease-out duration-300",enterFrom:"opacity-0",enterTo:"opacity-100",leave:"ease-in duration-200",leaveFrom:"opacity-100",leaveTo:"opacity-0",children:x("div",{className:"fixed inset-0 bg-black bg-opacity-25"})}),x("div",{className:"fixed inset-0 overflow-y-auto",children:x("div",{className:"flex min-h-full items-center justify-center p-4 text-center",children:x(hi.Child,{as:w.Fragment,enter:"ease-out duration-300",enterFrom:"opacity-0 scale-95",enterTo:"opacity-100 scale-100",leave:"ease-in duration-200",leaveFrom:"opacity-100 scale-100",leaveTo:"opacity-0 scale-95",children:B(di.Panel,{className:"w-full max-w-md transform overflow-hidden rounded-2xl bg-white p-6 text-left align-middle shadow-xl transition-all",children:[x(di.Title,{as:"h3",className:"text-lg font-medium leading-6 text-gray-900",children:r}),x("div",{className:"mt-3 text-sm text-gray-500",children:l}),B("div",{className:"mt-4 flex flex-row-reverse",children:[o&&x("button",{type:"button",className:"inline-flex ml-4 justify-center rounded-md border border-transparent bg-indigo-600 px-4 py-2 text-sm font-medium text-indigo-100 hover:bg-indigo-500 focus:outline-none focus-visible:ring-2 focus-visible:ring-indigo-500 focus-visible:ring-offset-2 transition-all duration-300",onClick:n,children:o}),x("button",{type:"button",className:"inline-flex justify-center rounded-md border border-transparent bg-indigo-100 px-4 py-2 text-sm font-medium text-indigo-900 hover:bg-indigo-200 focus:outline-none focus-visible:ring-2 focus-visible:ring-indigo-500 focus-visible:ring-offset-2 transition-all duration-300",onClick:t,children:"Close"})]})]})})})})]})})}function Ag(e){return x("div",{children:x("input",{...e,type:"url",className:"my-2 bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500",placeholder:"www.example.com",required:!0})})}function $g(e){const t=w.useRef(null),n=w.useRef(null);return w.useEffect(()=>{t.current&&n.current&&(n.current.src=e.audioUrl,t.current.load())},[e.audioUrl]),x("div",{className:"flex relative z-10 p-4 w-full",children:x("audio",{ref:t,controls:!0,className:"w-full h-14 rounded-lg bg-white shadow-xl shadow-black/5 ring-1 ring-slate-700/10",children:x("source",{ref:n,type:"audio/wav"})})})}function Dg(e){const{isModelLoading:t,isTranscribing:n,onClick:r,...l}=e;return x("button",{...l,onClick:o=>{r&&!n&&!t&&r(o)},disabled:n,className:"text-white bg-blue-700 hover:bg-blue-800 focus:ring-4 focus:ring-blue-300 font-medium rounded-lg text-sm px-5 py-2.5 text-center mr-2 dark:bg-blue-600 dark:hover:bg-blue-700 dark:focus:ring-blue-800 inline-flex items-center",children:t?x(Xa,{text:"Loading model..."}):n?x(Xa,{text:"Transcribing..."}):"Transcribe Audio"})}function Xa(e){return B("div",{role:"status",children:[B("svg",{"aria-hidden":"true",role:"status",className:"inline w-4 h-4 mr-3 text-white animate-spin",viewBox:"0 0 100 101",fill:"none",xmlns:"http://www.w3.org/2000/svg",children:[x("path",{d:"M100 50.5908C100 78.2051 77.6142 100.591 50 100.591C22.3858 100.591 0 78.2051 0 50.5908C0 22.9766 22.3858 0.59082 50 0.59082C77.6142 0.59082 100 22.9766 100 50.5908ZM9.08144 50.5908C9.08144 73.1895 27.4013 91.5094 50 91.5094C72.5987 91.5094 90.9186 73.1895 90.9186 50.5908C90.9186 27.9921 72.5987 9.67226 50 9.67226C27.4013 9.67226 9.08144 27.9921 9.08144 50.5908Z",fill:"#E5E7EB"}),x("path",{d:"M93.9676 39.0409C96.393 38.4038 97.8624 35.9116 97.0079 33.5539C95.2932 28.8227 92.871 24.3692 89.8167 20.348C85.8452 15.1192 80.8826 10.7238 75.2124 7.41289C69.5422 4.10194 63.2754 1.94025 56.7698 1.05124C51.7666 0.367541 46.6976 0.446843 41.7345 1.27873C39.2613 1.69328 37.813 4.19778 38.4501 6.62326C39.0873 9.04874 41.5694 10.4717 44.0505 10.1071C47.8511 9.54855 51.7191 9.52689 55.5402 10.0491C60.8642 10.7766 65.9928 12.5457 70.6331 15.2552C75.2735 17.9648 79.3347 21.5619 82.5849 25.841C84.9175 28.9121 86.7997 32.2913 88.1811 35.8758C89.083 38.2158 91.5421 39.6781 93.9676 39.0409Z",fill:"currentColor"})]}),e.text]})}function zg(){let e=!1;return function(t){(/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino|android|ipad|playbook|silk/i.test(t)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(t.substr(0,4)))&&(e=!0)}(navigator.userAgent||navigator.vendor||("opera"in window&&typeof window.opera=="string"?window.opera:"")),e}const Ja=zg(),Rt={SAMPLING_RATE:16e3,DEFAULT_AUDIO_URL:`https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/${Ja?"jfk":"ted_60"}.wav`,DEFAULT_MODEL:"tiny",DEFAULT_SUBTASK:"transcribe",DEFAULT_LANGUAGE:"auto",DEFAULT_QUANTIZED:Ja,DEFAULT_MULTILINGUAL:!1};function Mg({text:e,percentage:t}){return t=t??0,x("div",{className:"mt-0.5 w-full relative text-sm text-white background-bg-cyan-400 bg-gray-200 border-1 border-gray-400 rounded-lg text-left overflow-hidden",children:B("div",{className:"top-0 h-full bg-blue-500 whitespace-nowrap px-2",style:{width:`${t}%`},children:[e," (",`${t.toFixed(2)}%`,")"]})})}function jg(e){return e=e.toLowerCase(),(e.match(/\w+.?/g)||[]).map(t=>t.charAt(0).toUpperCase()+t.slice(1)).join("")}const Za={en:"english",zh:"chinese",de:"german",es:"spanish/castilian",ru:"russian",ko:"korean",fr:"french",ja:"japanese",pt:"portuguese",tr:"turkish",pl:"polish",ca:"catalan/valencian",nl:"dutch/flemish",ar:"arabic",sv:"swedish",it:"italian",id:"indonesian",hi:"hindi",fi:"finnish",vi:"vietnamese",he:"hebrew",uk:"ukrainian",el:"greek",ms:"malay",cs:"czech",ro:"romanian/moldavian/moldovan",da:"danish",hu:"hungarian",ta:"tamil",no:"norwegian",th:"thai",ur:"urdu",hr:"croatian",bg:"bulgarian",lt:"lithuanian",la:"latin",mi:"maori",ml:"malayalam",cy:"welsh",sk:"slovak",te:"telugu",fa:"persian",lv:"latvian",bn:"bengali",sr:"serbian",az:"azerbaijani",sl:"slovenian",kn:"kannada",et:"estonian",mk:"macedonian",br:"breton",eu:"basque",is:"icelandic",hy:"armenian",ne:"nepali",mn:"mongolian",bs:"bosnian",kk:"kazakh",sq:"albanian",sw:"swahili",gl:"galician",mr:"marathi",pa:"punjabi/panjabi",si:"sinhala/sinhalese",km:"khmer",sn:"shona",yo:"yoruba",so:"somali",af:"afrikaans",oc:"occitan",ka:"georgian",be:"belarusian",tg:"tajik",sd:"sindhi",gu:"gujarati",am:"amharic",yi:"yiddish",lo:"lao",uz:"uzbek",fo:"faroese",ht:"haitian creole/haitian",ps:"pashto/pushto",tk:"turkmen",nn:"nynorsk",mt:"maltese",sa:"sanskrit",lb:"luxembourgish/letzeburgesch",my:"myanmar/burmese",bo:"tibetan",tl:"tagalog",mg:"malagasy",as:"assamese",tt:"tatar",haw:"hawaiian",ln:"lingala",ha:"hausa",ba:"bashkir",jw:"javanese",su:"sundanese"};function Ug(e){const[t,n]=w.useState(void 0),[r,l]=w.useState(void 0),[o,i]=w.useState(void 0),u=t!==void 0,s=async c=>{const d=new AudioContext({sampleRate:Rt.SAMPLING_RATE}),h=URL.createObjectURL(new Blob([c],{type:"audio/*"})),y=await d.decodeAudioData(c);l({buffer:y,url:h,source:"URL"})},a=async c=>{if(o)try{l(void 0),n(0);const{data:d}=await I0.get(o,{signal:c.signal,responseType:"arraybuffer",onDownloadProgress(h){n(h.progress||0)}});s(d)}catch(d){console.log("Request failed or aborted",d)}finally{n(void 0)}};return w.useEffect(()=>{if(o){const c=new AbortController;return a(c),()=>{c.abort()}}},[o]),B(on,{children:[B("div",{className:"flex flex-col justify-center items-center rounded-lg bg-white shadow-xl shadow-black/5 ring-1 ring-slate-700/10",children:[B("div",{className:"flex flex-row space-x-2 py-2 w-full px-2",children:[x(Qg,{icon:x(Gg,{}),text:"From URL",onUrlUpdate:c=>{e.transcriber.onInputChange(),i(c)}}),x(Hg,{}),x(Kg,{icon:x(qg,{}),text:"From file",onFileUpdate:(c,d)=>{e.transcriber.onInputChange(),l({buffer:c,url:d,source:"FILE"})}})]}),x(Vg,{progress:u?t:+!!r})]}),r&&B(on,{children:[x($g,{audioUrl:r.url}),B("div",{className:"relative w-full flex justify-center items-center",children:[x(Dg,{onClick:()=>{e.transcriber.start(r.buffer)},isModelLoading:e.transcriber.isModelLoading,isTranscribing:e.transcriber.isBusy}),x(Ig,{className:"absolute right-4",transcriber:e.transcriber,icon:x(Yg,{})})]}),e.transcriber.progressItems.length>0&&B("div",{className:"relative z-10 p-4 w-full",children:[x("label",{children:"Loading model files... (only run once)"}),e.transcriber.progressItems.map(c=>x("div",{children:x(Mg,{text:c.file,percentage:c.progress})},c.file))]})]})]})}function Ig(e){const[t,n]=w.useState(!1),r=()=>{n(!0)},l=()=>{n(!1)},o=i=>{l()};return B("div",{className:e.className,children:[x(Ps,{icon:e.icon,onClick:r}),x(Bg,{show:t,onSubmit:o,onClose:l,transcriber:e.transcriber})]})}function Bg(e){const t=Object.values(Za).map(jg),n={tiny:[61,231],base:[103,398],small:[290],medium:[833]};return x(Xd,{show:e.show,title:"Settings",content:B(on,{children:[x("label",{children:"Select the model to use."}),x("select",{className:"mt-1 mb-1 bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500",defaultValue:e.transcriber.model,onChange:r=>{e.transcriber.setModel(r.target.value)},children:Object.keys(n).filter(r=>e.transcriber.quantized||n[r].length==2).map(r=>x("option",{value:r,children:`whisper-${r}${e.transcriber.multilingual?"":".en"} (${n[r][e.transcriber.quantized?0:1]}MB)`},r))}),B("div",{className:"flex justify-between items-center mb-3 px-1",children:[B("div",{className:"flex",children:[x("input",{id:"multilingual",type:"checkbox",checked:e.transcriber.multilingual,onChange:r=>{e.transcriber.setMultilingual(r.target.checked)}}),x("label",{htmlFor:"multilingual",className:"ms-1",children:"Multilingual"})]}),B("div",{className:"flex",children:[x("input",{id:"quantize",type:"checkbox",checked:e.transcriber.quantized,onChange:r=>{e.transcriber.setQuantized(r.target.checked)}}),x("label",{htmlFor:"quantize",className:"ms-1",children:"Quantized"})]})]}),e.transcriber.multilingual&&B(on,{children:[x("label",{children:"Select the source language."}),B("select",{className:"mt-1 mb-3 bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500",defaultValue:e.transcriber.language,onChange:r=>{e.transcriber.setLanguage(r.target.value)},children:[x("option",{value:"auto",children:"Auto-detect"},-1),Object.keys(Za).map((r,l)=>x("option",{value:r,children:t[l]},r))]}),x("label",{children:"Select the task to perform."}),B("select",{className:"mt-1 mb-3 bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500",defaultValue:e.transcriber.subtask,onChange:r=>{e.transcriber.setSubtask(r.target.value)},children:[x("option",{value:"transcribe",children:"Transcribe"}),x("option",{value:"translate",children:"Translate (to English)"})]})]})]}),onClose:e.onClose,onSubmit:()=>{}})}function Hg(){return x("div",{className:"w-[1px] bg-slate-200"})}function Vg(e){return x(Wg,{progress:`${Math.round(e.progress*100)}%`})}function Wg(e){return x("div",{className:"w-full bg-gray-200 rounded-full h-1 dark:bg-gray-700",children:x("div",{className:"bg-blue-600 h-1 rounded-full transition-all duration-100",style:{width:e.progress}})})}function Qg(e){const[t,n]=w.useState(!1),r=()=>{n(!0)},l=()=>{n(!1)},o=i=>{e.onUrlUpdate(i),l()};return B(on,{children:[x(Ps,{icon:e.icon,text:e.text,onClick:r}),x(bg,{show:t,onSubmit:o,onClose:l})]})}function bg(e){const[t,n]=w.useState(Rt.DEFAULT_AUDIO_URL),r=o=>{n(o.target.value)},l=()=>{e.onSubmit(t)};return x(Xd,{show:e.show,title:"From URL",content:B(on,{children:["Enter the URL of the audio file you want to load.",x(Ag,{onChange:r,value:t})]}),onClose:e.onClose,submitText:"Load",onSubmit:l})}function Kg(e){let t=document.createElement("input");return t.type="file",t.oninput=n=>{let r=n.target.files;if(!r)return;const l=URL.createObjectURL(r[0]),o=new FileReader;o.addEventListener("load",async i=>{var c;const u=(c=i.target)==null?void 0:c.result;if(!u)return;const a=await new AudioContext({sampleRate:Rt.SAMPLING_RATE}).decodeAudioData(u);e.onFileUpdate(a,l)}),o.readAsArrayBuffer(r[0]),t.value=""},x(on,{children:x(Ps,{icon:e.icon,text:e.text,onClick:()=>t.click()})})}function Ps(e){return B("button",{onClick:e.onClick,className:"flex items-center justify-center rounded-lg p-2 bg-blue text-slate-500 hover:text-indigo-600 hover:bg-indigo-50 transition-all duration-200",children:[x("div",{className:"w-7 h-7",children:e.icon}),e.text&&x("div",{className:"ml-2 break-text text-center text-md w-30",children:e.text})]})}function Gg(){return x("svg",{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",strokeWidth:"1.5",stroke:"currentColor",children:x("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M13.19 8.688a4.5 4.5 0 011.242 7.244l-4.5 4.5a4.5 4.5 0 01-6.364-6.364l1.757-1.757m13.35-.622l1.757-1.757a4.5 4.5 0 00-6.364-6.364l-4.5 4.5a4.5 4.5 0 001.242 7.244"})})}function qg(){return x("svg",{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",strokeWidth:"1.5",stroke:"currentColor",children:x("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M3.75 9.776c.112-.017.227-.026.344-.026h15.812c.117 0 .232.009.344.026m-16.5 0a2.25 2.25 0 00-1.883 2.542l.857 6a2.25 2.25 0 002.227 1.932H19.05a2.25 2.25 0 002.227-1.932l.857-6a2.25 2.25 0 00-1.883-2.542m-16.5 0V6A2.25 2.25 0 016 3.75h3.879a1.5 1.5 0 011.06.44l2.122 2.12a1.5 1.5 0 001.06.44H18A2.25 2.25 0 0120.25 9v.776"})})}function Yg(){return B("svg",{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",strokeWidth:"1.25",stroke:"currentColor",children:[x("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M9.594 3.94c.09-.542.56-.94 1.11-.94h2.593c.55 0 1.02.398 1.11.94l.213 1.281c.063.374.313.686.645.87.074.04.147.083.22.127.324.196.72.257 1.075.124l1.217-.456a1.125 1.125 0 011.37.49l1.296 2.247a1.125 1.125 0 01-.26 1.431l-1.003.827c-.293.24-.438.613-.431.992a6.759 6.759 0 010 .255c-.007.378.138.75.43.99l1.005.828c.424.35.534.954.26 1.43l-1.298 2.247a1.125 1.125 0 01-1.369.491l-1.217-.456c-.355-.133-.75-.072-1.076.124a6.57 6.57 0 01-.22.128c-.331.183-.581.495-.644.869l-.213 1.28c-.09.543-.56.941-1.11.941h-2.594c-.55 0-1.02-.398-1.11-.94l-.213-1.281c-.062-.374-.312-.686-.644-.87a6.52 6.52 0 01-.22-.127c-.325-.196-.72-.257-1.076-.124l-1.217.456a1.125 1.125 0 01-1.369-.49l-1.297-2.247a1.125 1.125 0 01.26-1.431l1.004-.827c.292-.24.437-.613.43-.992a6.932 6.932 0 010-.255c.007-.378-.138-.75-.43-.99l-1.004-.828a1.125 1.125 0 01-.26-1.43l1.297-2.247a1.125 1.125 0 011.37-.491l1.216.456c.356.133.751.072 1.076-.124.072-.044.146-.087.22-.128.332-.183.582-.495.644-.869l.214-1.281z"}),x("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M15 12a3 3 0 11-6 0 3 3 0 016 0z"})]})}function gi(e){return String(e).padStart(2,"0")}function Xg(e){const t=e/3600|0;e-=t*(60*60);const n=e/60|0;e-=n*60;const r=e|0;return`${t?gi(t)+":":""}${gi(n)}:${gi(r)}`}function Jg({transcribedData:e}){const t=w.useRef(null),n=()=>{let r=JSON.stringify((e==null?void 0:e.chunks)??{},null,2);const l=/( "timestamp": )\[\s+(\S+)\s+(\S+)\s+\]/gm;r=r.replace(l,"$1[$2 $3]");const o=new Blob([r],{type:"application/json"}),i=URL.createObjectURL(o),u=document.createElement("a");u.href=i,u.download="transcript.json",u.click(),URL.revokeObjectURL(i)};return w.useEffect(()=>{t.current&&Math.abs(t.current.offsetHeight+t.current.scrollTop-t.current.scrollHeight)<=64&&(t.current.scrollTop=t.current.scrollHeight)}),B("div",{ref:t,className:"w-full flex flex-col my-2 p-4 max-h-[20rem] overflow-y-auto",children:[e&&e.chunks.map((r,l)=>B("div",{className:"w-full flex flex-row mb-2 bg-white rounded-lg p-4 shadow-xl shadow-black/5 ring-1 ring-slate-700/10",children:[x("div",{className:"mr-5",children:Xg(r.timestamp[0])}),r.text]},`${l}-${r.text}`)),e&&!e.isBusy&&x("div",{className:"w-full text-right",children:x("button",{onClick:n,className:"text-white bg-green-500 hover:bg-green-600 focus:ring-4 focus:ring-green-300 font-medium rounded-lg text-sm px-4 py-2 text-center mr-2 dark:bg-green-600 dark:hover:bg-green-700 dark:focus:ring-green-800 inline-flex items-center",children:"Export JSON"})})]})}function Zg(e){const[t]=w.useState(()=>ev(e));return t}function ev(e){const t=new Worker(new URL("/assets/worker-c82d7cb9.js",self.location),{type:"module"});return t.addEventListener("message",e),t}function tv(){const[e,t]=w.useState(void 0),[n,r]=w.useState(!1),[l,o]=w.useState(!1),[i,u]=w.useState([]),s=Zg(L=>{const T=L.data;switch(T.status){case"progress":u(O=>O.map(V=>V.file===T.file?{...V,progress:T.progress}:V));break;case"update":const _=T;t({isBusy:!0,text:_.data[0],chunks:_.data[1].chunks});break;case"complete":const U=T;t({isBusy:!1,text:U.data.text,chunks:U.data.chunks}),r(!1);break;case"initiate":o(!0),u(O=>[...O,T]);break;case"ready":o(!1);break;case"error":r(!1),alert(`${T.data.message} This is most likely because you are using Safari on an M1/M2 Mac. Please try again from Chrome, Firefox, or Edge. - -If this is not the case, please file a bug report.`);break;case"done":u(O=>O.filter(V=>V.file!==T.file));break}}),[a,c]=w.useState(Rt.DEFAULT_MODEL),[d,h]=w.useState(Rt.DEFAULT_SUBTASK),[y,m]=w.useState(Rt.DEFAULT_QUANTIZED),[v,P]=w.useState(Rt.DEFAULT_MULTILINGUAL),[p,f]=w.useState(Rt.DEFAULT_LANGUAGE),g=w.useCallback(()=>{t(void 0)},[]),E=w.useCallback(async L=>{L&&(t(void 0),r(!0),s.postMessage({audio:L.getChannelData(0),model:a,multilingual:v,quantized:y,subtask:v?d:null,language:v&&p!=="auto"?p:null}))},[s,a,v,y,d,p]);return w.useMemo(()=>({onInputChange:g,isBusy:n,isModelLoading:l,progressItems:i,start:E,output:e,model:a,setModel:c,multilingual:v,setMultilingual:P,quantized:y,setQuantized:m,subtask:d,setSubtask:h,language:p,setLanguage:f}),[n,l,i,E,e,a,v,y,d,p])}function nv(){const e=tv();return x("div",{className:"flex justify-center items-center min-h-screen",children:B("div",{className:"container flex flex-col justify-center items-center",children:[x("h1",{className:"text-5xl font-extrabold tracking-tight text-slate-900 sm:text-7xl text-center",children:"Whisper Web"}),x("h2",{className:"mt-3 mb-5 px-4 text-center text-1xl font-semibold tracking-tight text-slate-900 sm:text-2xl",children:"ML-powered speech recognition directly in your browser"}),x(Ug,{transcriber:e}),x(Jg,{transcribedData:e.output})]})})}yi.createRoot(document.getElementById("root")).render(x(D.StrictMode,{children:x(nv,{})})); diff --git a/spaces/XzJosh/Echo-Bert-VITS2/text/japanese.py b/spaces/XzJosh/Echo-Bert-VITS2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Echo-Bert-VITS2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/XzJosh/Nana7mi-Bert-VITS2/monotonic_align/core.py b/spaces/XzJosh/Nana7mi-Bert-VITS2/monotonic_align/core.py deleted file mode 100644 index dddc688d76172b880054e544b7a217acd013f14f..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Nana7mi-Bert-VITS2/monotonic_align/core.py +++ /dev/null @@ -1,35 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val=-1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y-1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y-1, x-1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - index = index - 1 diff --git a/spaces/XzJosh/Taffy-Bert-VITS2/text/chinese_bert.py b/spaces/XzJosh/Taffy-Bert-VITS2/text/chinese_bert.py deleted file mode 100644 index cb84ce0b426cd0a1c7954ddcdf41322c10ed14fa..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Taffy-Bert-VITS2/text/chinese_bert.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large") -model = AutoModelForMaskedLM.from_pretrained("./bert/chinese-roberta-wwm-ext-large").to(device) - -def get_bert_feature(text, word2ph): - with torch.no_grad(): - inputs = tokenizer(text, return_tensors='pt') - for i in inputs: - inputs[i] = inputs[i].to(device) - res = model(**inputs, output_hidden_states=True) - res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu() - - assert len(word2ph) == len(text)+2 - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - - return phone_level_feature.T - -if __name__ == '__main__': - # feature = get_bert_feature('你好,我是说的道理。') - import torch - - word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征 - word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1] - - # 计算总帧数 - total_frames = sum(word2phone) - print(word_level_feature.shape) - print(word2phone) - phone_level_feature = [] - for i in range(len(word2phone)): - print(word_level_feature[i].shape) - - # 对每个词重复word2phone[i]次 - repeat_feature = word_level_feature[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - print(phone_level_feature.shape) # torch.Size([36, 1024]) - diff --git a/spaces/YUANAI/DiffspeechResearch/modules/commons/nar_tts_modules.py b/spaces/YUANAI/DiffspeechResearch/modules/commons/nar_tts_modules.py deleted file mode 100644 index fe9a3325554ccf6059316d0848be1281c14ecffe..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/modules/commons/nar_tts_modules.py +++ /dev/null @@ -1,104 +0,0 @@ -import torch -from torch import nn - -from modules.commons.layers import LayerNorm -import torch.nn.functional as F - - -class DurationPredictor(torch.nn.Module): - def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0): - super(DurationPredictor, self).__init__() - self.offset = offset - self.conv = torch.nn.ModuleList() - self.kernel_size = kernel_size - for idx in range(n_layers): - in_chans = idim if idx == 0 else n_chans - self.conv += [torch.nn.Sequential( - torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=kernel_size // 2), - torch.nn.ReLU(), - LayerNorm(n_chans, dim=1), - torch.nn.Dropout(dropout_rate) - )] - self.linear = nn.Sequential(torch.nn.Linear(n_chans, 1), nn.Softplus()) - - def forward(self, x, x_padding=None): - x = x.transpose(1, -1) # (B, idim, Tmax) - for f in self.conv: - x = f(x) # (B, C, Tmax) - if x_padding is not None: - x = x * (1 - x_padding.float())[:, None, :] - - x = self.linear(x.transpose(1, -1)) # [B, T, C] - x = x * (1 - x_padding.float())[:, :, None] # (B, T, C) - x = x[..., 0] # (B, Tmax) - return x - - -class LengthRegulator(torch.nn.Module): - def __init__(self, pad_value=0.0): - super(LengthRegulator, self).__init__() - self.pad_value = pad_value - - def forward(self, dur, dur_padding=None, alpha=1.0): - """ - Example (no batch dim version): - 1. dur = [2,2,3] - 2. token_idx = [[1],[2],[3]], dur_cumsum = [2,4,7], dur_cumsum_prev = [0,2,4] - 3. token_mask = [[1,1,0,0,0,0,0], - [0,0,1,1,0,0,0], - [0,0,0,0,1,1,1]] - 4. token_idx * token_mask = [[1,1,0,0,0,0,0], - [0,0,2,2,0,0,0], - [0,0,0,0,3,3,3]] - 5. (token_idx * token_mask).sum(0) = [1,1,2,2,3,3,3] - - :param dur: Batch of durations of each frame (B, T_txt) - :param dur_padding: Batch of padding of each frame (B, T_txt) - :param alpha: duration rescale coefficient - :return: - mel2ph (B, T_speech) - assert alpha > 0 - """ - dur = torch.round(dur.float() * alpha).long() - if dur_padding is not None: - dur = dur * (1 - dur_padding.long()) - token_idx = torch.arange(1, dur.shape[1] + 1)[None, :, None].to(dur.device) - dur_cumsum = torch.cumsum(dur, 1) - dur_cumsum_prev = F.pad(dur_cumsum, [1, -1], mode='constant', value=0) - - pos_idx = torch.arange(dur.sum(-1).max())[None, None].to(dur.device) - token_mask = (pos_idx >= dur_cumsum_prev[:, :, None]) & (pos_idx < dur_cumsum[:, :, None]) - mel2token = (token_idx * token_mask.long()).sum(1) - return mel2token - - -class PitchPredictor(torch.nn.Module): - def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5, dropout_rate=0.1): - super(PitchPredictor, self).__init__() - self.conv = torch.nn.ModuleList() - self.kernel_size = kernel_size - for idx in range(n_layers): - in_chans = idim if idx == 0 else n_chans - self.conv += [torch.nn.Sequential( - torch.nn.Conv1d(in_chans, n_chans, kernel_size, padding=kernel_size // 2), - torch.nn.ReLU(), - LayerNorm(n_chans, dim=1), - torch.nn.Dropout(dropout_rate) - )] - self.linear = torch.nn.Linear(n_chans, odim) - - def forward(self, x): - """ - - :param x: [B, T, H] - :return: [B, T, H] - """ - x = x.transpose(1, -1) # (B, idim, Tmax) - for f in self.conv: - x = f(x) # (B, C, Tmax) - x = self.linear(x.transpose(1, -1)) # (B, Tmax, H) - return x - - -class EnergyPredictor(PitchPredictor): - pass diff --git a/spaces/Yan233th/so-vits-svc-models/utils.py b/spaces/Yan233th/so-vits-svc-models/utils.py deleted file mode 100644 index 229ac28ca48940370f63f2a7691ee6561910e2a6..0000000000000000000000000000000000000000 --- a/spaces/Yan233th/so-vits-svc-models/utils.py +++ /dev/null @@ -1,502 +0,0 @@ -import os -import glob -import re -import sys -import argparse -import logging -import json -import subprocess -import random - -import librosa -import numpy as np -from scipy.io.wavfile import read -import torch -from torch.nn import functional as F -from modules.commons import sequence_mask -from hubert import hubert_model -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - -f0_bin = 256 -f0_max = 1100.0 -f0_min = 50.0 -f0_mel_min = 1127 * np.log(1 + f0_min / 700) -f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - -# def normalize_f0(f0, random_scale=True): -# f0_norm = f0.clone() # create a copy of the input Tensor -# batch_size, _, frame_length = f0_norm.shape -# for i in range(batch_size): -# means = torch.mean(f0_norm[i, 0, :]) -# if random_scale: -# factor = random.uniform(0.8, 1.2) -# else: -# factor = 1 -# f0_norm[i, 0, :] = (f0_norm[i, 0, :] - means) * factor -# return f0_norm -# def normalize_f0(f0, random_scale=True): -# means = torch.mean(f0[:, 0, :], dim=1, keepdim=True) -# if random_scale: -# factor = torch.Tensor(f0.shape[0],1).uniform_(0.8, 1.2).to(f0.device) -# else: -# factor = torch.ones(f0.shape[0], 1, 1).to(f0.device) -# f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1) -# return f0_norm -def normalize_f0(f0, x_mask, uv, random_scale=True): - # calculate means based on x_mask - uv_sum = torch.sum(uv, dim=1, keepdim=True) - uv_sum[uv_sum == 0] = 9999 - means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum - - if random_scale: - factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device) - else: - factor = torch.ones(f0.shape[0], 1).to(f0.device) - # normalize f0 based on means and factor - f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1) - if torch.isnan(f0_norm).any(): - exit(0) - return f0_norm * x_mask - - -def plot_data_to_numpy(x, y): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - plt.plot(x) - plt.plot(y) - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - - -def interpolate_f0(f0): - ''' - 对F0进行插值处理 - ''' - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] - last_value = data[i] - - return ip_data[:,0], vuv_vector[:,0] - - -def compute_f0_parselmouth(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512): - import parselmouth - x = wav_numpy - if p_len is None: - p_len = x.shape[0]//hop_length - else: - assert abs(p_len-x.shape[0]//hop_length) < 4, "pad length error" - time_step = hop_length / sampling_rate * 1000 - f0_min = 50 - f0_max = 1100 - f0 = parselmouth.Sound(x, sampling_rate).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - - pad_size=(p_len - len(f0) + 1) // 2 - if(pad_size>0 or p_len - len(f0) - pad_size>0): - f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') - return f0 - -def resize_f0(x, target_len): - source = np.array(x) - source[source<0.001] = np.nan - target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source) - res = np.nan_to_num(target) - return res - -def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512): - import pyworld - if p_len is None: - p_len = wav_numpy.shape[0]//hop_length - f0, t = pyworld.dio( - wav_numpy.astype(np.double), - fs=sampling_rate, - f0_ceil=800, - frame_period=1000 * hop_length / sampling_rate, - ) - f0 = pyworld.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return resize_f0(f0, p_len) - -def f0_to_coarse(f0): - is_torch = isinstance(f0, torch.Tensor) - f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1 - - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1 - f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int) - assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min()) - return f0_coarse - - -def get_hubert_model(): - vec_path = "hubert/checkpoint_best_legacy_500.pt" - print("load model(s) from {}".format(vec_path)) - from fairseq import checkpoint_utils - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [vec_path], - suffix="", - ) - model = models[0] - model.eval() - return model - -def get_hubert_content(hmodel, wav_16k_tensor): - feats = wav_16k_tensor - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - inputs = { - "source": feats.to(wav_16k_tensor.device), - "padding_mask": padding_mask.to(wav_16k_tensor.device), - "output_layer": 9, # layer 9 - } - with torch.no_grad(): - logits = hmodel.extract_features(**inputs) - feats = hmodel.final_proj(logits[0]) - return feats.transpose(1, 2) - - -def get_content(cmodel, y): - with torch.no_grad(): - c = cmodel.extract_features(y.squeeze(1))[0] - c = c.transpose(1, 2) - return c - - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - # assert "dec" in k or "disc" in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -def repeat_expand_2d(content, target_len): - # content : [h, t] - - src_len = content.shape[-1] - target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(content.device) - temp = torch.arange(src_len+1) * target_len / src_len - current_pos = 0 - for i in range(target_len): - if i < temp[current_pos+1]: - target[:, i] = content[:, current_pos] - else: - current_pos += 1 - target[:, i] = content[:, current_pos] - - return target - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() - diff --git a/spaces/YazawaSunrise/so-vits-svc-LoveLive/hubert/hubert_model.py b/spaces/YazawaSunrise/so-vits-svc-LoveLive/hubert/hubert_model.py deleted file mode 100644 index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000 --- a/spaces/YazawaSunrise/so-vits-svc-LoveLive/hubert/hubert_model.py +++ /dev/null @@ -1,222 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - x, mask = self.encode(x) - x = self.proj(x) - logits = self.logits(x) - return logits, mask - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - @torch.inference_mode() - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/YotamNitzan/domain-expansion/style_mixing.py b/spaces/YotamNitzan/domain-expansion/style_mixing.py deleted file mode 100644 index c47bebbc44c0126b6fd00a55b8b487dc7b159653..0000000000000000000000000000000000000000 --- a/spaces/YotamNitzan/domain-expansion/style_mixing.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Generate style mixing image matrix using pretrained network pickle.""" - -import os -import re -from typing import List - -import click -import dnnlib -import numpy as np -import PIL.Image -import torch - -import legacy - -#---------------------------------------------------------------------------- - -def num_range(s: str) -> List[int]: - '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.''' - - range_re = re.compile(r'^(\d+)-(\d+)$') - m = range_re.match(s) - if m: - return list(range(int(m.group(1)), int(m.group(2))+1)) - vals = s.split(',') - return [int(x) for x in vals] - -#---------------------------------------------------------------------------- - -@click.command() -@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) -@click.option('--rows', 'row_seeds', type=num_range, help='Random seeds to use for image rows', required=True) -@click.option('--cols', 'col_seeds', type=num_range, help='Random seeds to use for image columns', required=True) -@click.option('--styles', 'col_styles', type=num_range, help='Style layer range', default='0-6', show_default=True) -@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) -@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True) -@click.option('--outdir', type=str, required=True) -def generate_style_mix( - network_pkl: str, - row_seeds: List[int], - col_seeds: List[int], - col_styles: List[int], - truncation_psi: float, - noise_mode: str, - outdir: str -): - """Generate images using pretrained network pickle. - - Examples: - - \b - python style_mixing.py --outdir=out --rows=85,100,75,458,1500 --cols=55,821,1789,293 \\ - --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl - """ - print('Loading networks from "%s"...' % network_pkl) - device = torch.device('cuda') - with dnnlib.util.open_url(network_pkl) as f: - G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore - - os.makedirs(outdir, exist_ok=True) - - print('Generating W vectors...') - all_seeds = list(set(row_seeds + col_seeds)) - all_z = np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds]) - all_w = G.mapping(torch.from_numpy(all_z).to(device), None) - w_avg = G.mapping.w_avg - all_w = w_avg + (all_w - w_avg) * truncation_psi - w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))} - - print('Generating images...') - all_images = G.synthesis(all_w, noise_mode=noise_mode) - all_images = (all_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy() - image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))} - - print('Generating style-mixed images...') - for row_seed in row_seeds: - for col_seed in col_seeds: - w = w_dict[row_seed].clone() - w[col_styles] = w_dict[col_seed][col_styles] - image = G.synthesis(w[np.newaxis], noise_mode=noise_mode) - image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) - image_dict[(row_seed, col_seed)] = image[0].cpu().numpy() - - print('Saving images...') - os.makedirs(outdir, exist_ok=True) - for (row_seed, col_seed), image in image_dict.items(): - PIL.Image.fromarray(image, 'RGB').save(f'{outdir}/{row_seed}-{col_seed}.png') - - print('Saving image grid...') - W = G.img_resolution - H = G.img_resolution - canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black') - for row_idx, row_seed in enumerate([0] + row_seeds): - for col_idx, col_seed in enumerate([0] + col_seeds): - if row_idx == 0 and col_idx == 0: - continue - key = (row_seed, col_seed) - if row_idx == 0: - key = (col_seed, col_seed) - if col_idx == 0: - key = (row_seed, row_seed) - canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx)) - canvas.save(f'{outdir}/grid.png') - - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - generate_style_mix() # pylint: disable=no-value-for-parameter - -#---------------------------------------------------------------------------- diff --git a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py b/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py deleted file mode 100644 index 2753b3ddee43c7a9fe28d1824db5d786e7e1ad59..0000000000000000000000000000000000000000 --- a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py +++ /dev/null @@ -1,297 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import DropPath - - -class FeatureResizer(nn.Module): - """ - This class takes as input a set of embeddings of dimension C1 and outputs a set of - embedding of dimension C2, after a linear transformation, dropout and normalization (LN). - """ - - def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): - super().__init__() - self.do_ln = do_ln - # Object feature encoding - self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) - self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) - self.dropout = nn.Dropout(dropout) - - def forward(self, encoder_features): - x = self.fc(encoder_features) - if self.do_ln: - x = self.layer_norm(x) - output = self.dropout(x) - return output - - -def l1norm(X, dim, eps=1e-8): - """L1-normalize columns of X""" - norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps - X = torch.div(X, norm) - return X - - -def l2norm(X, dim, eps=1e-8): - """L2-normalize columns of X""" - norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps - X = torch.div(X, norm) - return X - - -def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8): - """ - query: (n_context, queryL, d) - context: (n_context, sourceL, d) - """ - batch_size_q, queryL = query.size(0), query.size(1) - batch_size, sourceL = context.size(0), context.size(1) - - # Get attention - # --> (batch, d, queryL) - queryT = torch.transpose(query, 1, 2) - - # (batch, sourceL, d)(batch, d, queryL) - # --> (batch, sourceL, queryL) - attn = torch.bmm(context, queryT) - if raw_feature_norm == "softmax": - # --> (batch*sourceL, queryL) - attn = attn.view(batch_size * sourceL, queryL) - attn = nn.Softmax()(attn) - # --> (batch, sourceL, queryL) - attn = attn.view(batch_size, sourceL, queryL) - elif raw_feature_norm == "l2norm": - attn = l2norm(attn, 2) - elif raw_feature_norm == "clipped_l2norm": - attn = nn.LeakyReLU(0.1)(attn) - attn = l2norm(attn, 2) - else: - raise ValueError("unknown first norm type:", raw_feature_norm) - # --> (batch, queryL, sourceL) - attn = torch.transpose(attn, 1, 2).contiguous() - # --> (batch*queryL, sourceL) - attn = attn.view(batch_size * queryL, sourceL) - attn = nn.Softmax()(attn * smooth) - # --> (batch, queryL, sourceL) - attn = attn.view(batch_size, queryL, sourceL) - # --> (batch, sourceL, queryL) - attnT = torch.transpose(attn, 1, 2).contiguous() - - # --> (batch, d, sourceL) - contextT = torch.transpose(context, 1, 2) - # (batch x d x sourceL)(batch x sourceL x queryL) - # --> (batch, d, queryL) - weightedContext = torch.bmm(contextT, attnT) - # --> (batch, queryL, d) - weightedContext = torch.transpose(weightedContext, 1, 2) - - return weightedContext, attnT - - -class BiMultiHeadAttention(nn.Module): - def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None): - super(BiMultiHeadAttention, self).__init__() - - self.embed_dim = embed_dim - self.num_heads = num_heads - self.head_dim = embed_dim // num_heads - self.v_dim = v_dim - self.l_dim = l_dim - - assert ( - self.head_dim * self.num_heads == self.embed_dim - ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." - self.scale = self.head_dim ** (-0.5) - self.dropout = dropout - - self.v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.l_proj = nn.Linear(self.l_dim, self.embed_dim) - self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim) - - self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim) - self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim) - - self.stable_softmax_2d = True - self.clamp_min_for_underflow = True - self.clamp_max_for_overflow = True - - self._reset_parameters() - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def _reset_parameters(self): - nn.init.xavier_uniform_(self.v_proj.weight) - self.v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.l_proj.weight) - self.l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_v_proj.weight) - self.values_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_l_proj.weight) - self.values_l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_v_proj.weight) - self.out_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_l_proj.weight) - self.out_l_proj.bias.data.fill_(0) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - """_summary_ - - Args: - v (_type_): bs, n_img, dim - l (_type_): bs, n_text, dim - attention_mask_v (_type_, optional): _description_. bs, n_img - attention_mask_l (_type_, optional): _description_. bs, n_text - - Returns: - _type_: _description_ - """ - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - bsz, tgt_len, _ = v.size() - - query_states = self.v_proj(v) * self.scale - key_states = self._shape(self.l_proj(l), -1, bsz) - value_v_states = self._shape(self.values_v_proj(v), -1, bsz) - value_l_states = self._shape(self.values_l_proj(l), -1, bsz) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_v_states = value_v_states.view(*proj_shape) - value_l_states = value_l_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" - ) - - if self.stable_softmax_2d: - attn_weights = attn_weights - attn_weights.max() - - if self.clamp_min_for_underflow: - attn_weights = torch.clamp( - attn_weights, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights = torch.clamp( - attn_weights, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - attn_weights_T = attn_weights.transpose(1, 2) - attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0] - if self.clamp_min_for_underflow: - attn_weights_l = torch.clamp( - attn_weights_l, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights_l = torch.clamp( - attn_weights_l, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - # mask vison for language - if attention_mask_v is not None: - attention_mask_v = ( - attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights_l.masked_fill_(attention_mask_v, float("-inf")) - - attn_weights_l = attn_weights_l.softmax(dim=-1) - - # mask language for vision - if attention_mask_l is not None: - attention_mask_l = ( - attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights.masked_fill_(attention_mask_l, float("-inf")) - attn_weights_v = attn_weights.softmax(dim=-1) - - attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training) - attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training) - - attn_output_v = torch.bmm(attn_probs_v, value_l_states) - attn_output_l = torch.bmm(attn_probs_l, value_v_states) - - if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}" - ) - - if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim): - raise ValueError( - f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}" - ) - - attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output_v = attn_output_v.transpose(1, 2) - attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim) - - attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim) - attn_output_l = attn_output_l.transpose(1, 2) - attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim) - - attn_output_v = self.out_v_proj(attn_output_v) - attn_output_l = self.out_l_proj(attn_output_l) - - return attn_output_v, attn_output_l - - -# Bi-Direction MHA (text->image, image->text) -class BiAttentionBlock(nn.Module): - def __init__( - self, - v_dim, - l_dim, - embed_dim, - num_heads, - dropout=0.1, - drop_path=0.0, - init_values=1e-4, - cfg=None, - ): - """ - Inputs: - embed_dim - Dimensionality of input and attention feature vectors - hidden_dim - Dimensionality of hidden layer in feed-forward network - (usually 2-4x larger than embed_dim) - num_heads - Number of heads to use in the Multi-Head Attention block - dropout - Amount of dropout to apply in the feed-forward network - """ - super(BiAttentionBlock, self).__init__() - - # pre layer norm - self.layer_norm_v = nn.LayerNorm(v_dim) - self.layer_norm_l = nn.LayerNorm(l_dim) - self.attn = BiMultiHeadAttention( - v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout - ) - - # add layer scale for training stability - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True) - self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - v = self.layer_norm_v(v) - l = self.layer_norm_l(l) - delta_v, delta_l = self.attn( - v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l - ) - # v, l = v + delta_v, l + delta_l - v = v + self.drop_path(self.gamma_v * delta_v) - l = l + self.drop_path(self.gamma_l * delta_l) - return v, l - - # def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None) diff --git a/spaces/ZJunTvT/ZJunChat/run_Windows.bat b/spaces/ZJunTvT/ZJunChat/run_Windows.bat deleted file mode 100644 index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000 --- a/spaces/ZJunTvT/ZJunChat/run_Windows.bat +++ /dev/null @@ -1,5 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" diff --git a/spaces/a-v-bely/spanish-task-generator/utilities_language_general/esp_constants.py b/spaces/a-v-bely/spanish-task-generator/utilities_language_general/esp_constants.py deleted file mode 100644 index 034492b0f865f5755f3c35b4afa40ae9f6057447..0000000000000000000000000000000000000000 --- a/spaces/a-v-bely/spanish-task-generator/utilities_language_general/esp_constants.py +++ /dev/null @@ -1,63 +0,0 @@ -import json -import spacy -import gensim -import streamlit as st -from transformers import pipeline - - -@st.cache_resource -def load_w2v(model_path): - with st.spinner('Загружаю языковую модель'): - _w2v_model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=True) - return _w2v_model - - -@st.cache_resource -def load_spacy(): - with st.spinner('Загружаю морфо-синтаксический парсер'): - _nlp = spacy.load('es_core_news_lg') - return _nlp - - -@st.cache_resource -def load_bert(): - with st.spinner('Загружаю языковую модель'): - _pipeline = pipeline(task="fill-mask", model="a-v-white/bert-base-spanish-wwm-cased-finetuned-literature-pro") - return _pipeline - - -nlp = load_spacy() -w2v_model_1_path = r'model1.gz' -w2v_model_2_path = r'model2.gz' - -# Upload minimums -a1_path, a1_target_set = r'lexical_minimums/A1_MINIMUM.txt', set() -a2_path, a2_target_set = r'lexical_minimums/A2_MINIMUM.txt', set() -b1_path, b1_target_set = r'lexical_minimums/B1_MINIMUM.txt', set() -b2_path, b2_target_set = r'lexical_minimums/B2_MINIMUM.txt', set() -c1_path, c1_target_set = r'lexical_minimums/C1_MINIMUM.txt', set() -c2_path, c2_target_set = r'lexical_minimums/C2_MINIMUM.txt', set() -minimums_paths = (a1_path, a2_path, b1_path, b2_path) -minimums_sets = (a1_target_set, a2_target_set, b1_target_set, b2_target_set, c1_target_set, c2_target_set) -for i in range(len(minimums_paths)): - with open(minimums_paths[i], 'r', encoding='utf-8') as read_file: - for line in read_file: - minimums_sets[i].add(line.strip()) - -a1_distractor_set = a1_target_set -a2_distractor_set = a2_target_set.union(a1_target_set) -b1_distractor_set = b1_target_set.union(a2_target_set) -b2_distractor_set = b2_target_set.union(b1_target_set) -c1_distractor_set = c1_target_set.union(b2_target_set) -c2_distractor_set = c2_target_set.union(c1_target_set) - -with open('language_data/phrases.json', 'r', encoding='utf-8') as f: - PHRASES = set(json.load(f)['PHRASES']) - -with open('language_data/fix_irregular_lemma.json', 'r', encoding='utf-8') as f: - FIX_LEMMA = json.load(f) - -SIMILARITY_VALUES = {'A1': 1.0, 'A2': 1.0, 'B1': 1.0, 'B2': 1.0, 'C1': 1.0, 'C2': 1.0, 'Без уровня': 1.0} -SIMILARITY_VALUES_bert = {'A1': 1.0, 'A2': 1.0, 'B1': 1.0, 'B2': 1.0, 'C1': 1.0, 'C2': 1.0, 'Без уровня': 1.0} - -BAD_USER_TARGET_WORDS = [] diff --git a/spaces/aadnk/whisper-webui/src/prompts/prependPromptStrategy.py b/spaces/aadnk/whisper-webui/src/prompts/prependPromptStrategy.py deleted file mode 100644 index 6f8b6eba5b98310f57a656db73b5e415de3af958..0000000000000000000000000000000000000000 --- a/spaces/aadnk/whisper-webui/src/prompts/prependPromptStrategy.py +++ /dev/null @@ -1,31 +0,0 @@ -from src.config import VadInitialPromptMode -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy - -class PrependPromptStrategy(AbstractPromptStrategy): - """ - A simple prompt strategy that prepends a single prompt to all segments of audio, or prepends the prompt to the first segment of audio. - """ - def __init__(self, initial_prompt: str, initial_prompt_mode: VadInitialPromptMode): - """ - Parameters - ---------- - initial_prompt: str - The initial prompt to use for the transcription. - initial_prompt_mode: VadInitialPromptMode - The mode to use for the initial prompt. If set to PREPEND_FIRST_SEGMENT, the initial prompt will be prepended to the first segment of audio. - If set to PREPEND_ALL_SEGMENTS, the initial prompt will be prepended to all segments of audio. - """ - self.initial_prompt = initial_prompt - self.initial_prompt_mode = initial_prompt_mode - - # This is a simple prompt strategy, so we only support these two modes - if initial_prompt_mode not in [VadInitialPromptMode.PREPEND_ALL_SEGMENTS, VadInitialPromptMode.PREPREND_FIRST_SEGMENT]: - raise ValueError(f"Unsupported initial prompt mode {initial_prompt_mode}") - - def get_segment_prompt(self, segment_index: int, whisper_prompt: str, detected_language: str) -> str: - if (self.initial_prompt_mode == VadInitialPromptMode.PREPEND_ALL_SEGMENTS): - return self._concat_prompt(self.initial_prompt, whisper_prompt) - elif (self.initial_prompt_mode == VadInitialPromptMode.PREPREND_FIRST_SEGMENT): - return self._concat_prompt(self.initial_prompt, whisper_prompt) if segment_index == 0 else whisper_prompt - else: - raise ValueError(f"Unknown initial prompt mode {self.initial_prompt_mode}") \ No newline at end of file diff --git a/spaces/abdvl/datahub_qa_bot/docs/advanced/mcp-mcl.md b/spaces/abdvl/datahub_qa_bot/docs/advanced/mcp-mcl.md deleted file mode 100644 index 5a9052c19155b6e3d4f9531364d367579d21a833..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/advanced/mcp-mcl.md +++ /dev/null @@ -1,159 +0,0 @@ -# MetadataChangeProposal & MetadataChangeLog Events - -## Overview & Vision - -As of release v0.8.7, two new important event streams have been introduced: MetadataChangeProposal & MetadataChangeLog. These topics serve as a more generic (and more appropriately named) versions of the classic MetadataChangeEvent and MetadataAuditEvent events, used for a) proposing and b) logging changes to the DataHub Metadata Graph. - -With these events, we move towards a more generic world, in which Metadata models are not strongly-typed parts of the event schemas themselves. This provides flexibility, allowing for the core models comprising the Metadata Graph to be added and changed dynamically, without requiring structural updates to Kafka or REST API schemas used for ingesting and serving Metadata. - -Moreover, we've focused in on the "aspect" as the atomic unit of write in DataHub. MetadataChangeProposal & MetadataChangeLog with carry only a single aspect in their payload, as opposed to the list of aspects carried by today's MCE & MAE. This more accurately reflects the atomicity contract of the metadata model, hopefully lessening confusion about transactional guarantees for multi-aspect writes in addition to making it simpler to tune into the metadata changes a consumer cares about. - -Making these events more generic does not come for free; we give up some in the form of Restli and Kafka-native schema validation and defer this responsibility to DataHub itself, who is the sole enforcer of the graph model contracts. Additionally, we add an extra step to unbundling the actual metadata by requiring a double-deserialization: that of the event / response body itself and another of the nested Metadata aspect. - -To mitigate these downsides, we are committed to providing cross-language client libraries capable of doing the hard work for you. We intend to publish these as strongly-typed artifacts generated from the "default" model set DataHub ships with. This stands in addition to an initiative to introduce an OpenAPI layer in DataHub's backend (gms) which would provide a strongly typed model. - -Ultimately, we intend to realize a state in which the Entities and Aspect schemas can be altered without requiring generated code and without maintaining a single mega-model schema (looking at you, Snapshot.pdl). The intention is that changes to the metadata model become even easier than they are today. - -## Modeling - -A Metadata Change Proposal is defined (in PDL) as follows - -```protobuf -record MetadataChangeProposal { - - /** - * Kafka audit header. See go/kafkaauditheader for more info. - */ - auditHeader: optional KafkaAuditHeader - - /** - * Type of the entity being written to - */ - entityType: string - - /** - * Urn of the entity being written - **/ - entityUrn: optional Urn, - - /** - * Key aspect of the entity being written - */ - entityKeyAspect: optional GenericAspect - - /** - * Type of change being proposed - */ - changeType: ChangeType - - /** - * Aspect of the entity being written to - * Not filling this out implies that the writer wants to affect the entire entity - * Note: This is only valid for CREATE and DELETE operations. - **/ - aspectName: optional string - - aspect: optional GenericAspect - - /** - * A string->string map of custom properties that one might want to attach to an event - **/ - systemMetadata: optional SystemMetadata - -} -``` - -Each proposal comprises of the following: - -1. entityType - - Refers to the type of the entity e.g. dataset, chart - -2. entityUrn - - Urn of the entity being updated. Note, **exactly one** of entityUrn or entityKeyAspect must be filled out to correctly identify an entity. - -3. entityKeyAspect - - Key aspect of the entity. Instead of having a string URN, we will support identifying entities by their key aspect structs. Note, this is not supported as of now. - -4. changeType - - Type of change you are proposing: one of - - - UPSERT: Insert if not exists, update otherwise - - CREATE: Insert if not exists, fail otherwise - - UPDATE: Update if exists, fail otherwise - - DELETE: Delete - - PATCH: Patch the aspect instead of doing a full replace - - Only UPSERT is supported as of now. - -5. aspectName - - Name of the aspect. Must match the name in the "@Aspect" annotation. - -6. aspect - - To support strongly typed aspects, without having to keep track of a union of all existing aspects, we introduced a new object called GenericAspect. - - ```xml - record GenericAspect { - value: bytes - contentType: string - } - ``` - - It contains the type of serialization and the serialized value. Note, currently we only support "application/json" as contentType but will be adding more forms of serialization in the future. Validation of the serialized object happens in GMS against the schema matching the aspectName. - -7. systemMetadata - - Extra metadata about the proposal like run_id or updated timestamp. - -GMS processes the proposal and produces the Metadata Change Log, which looks like this. - -```protobuf -record MetadataChangeLog includes MetadataChangeProposal { - - previousAspectValue: optional GenericAspect - - previousSystemMetadata: optional SystemMetadata - -} -``` - -It includes all fields in the proposal, but also has the previous version of the aspect value and system metadata. This allows the MCL processor to know the previous value before deciding to update all indices. - -## Topics - -Following the change in our event models, we introduced 4 new topics. The old topics will get deprecated as we fully migrate to this model. - -1. **MetadataChangeProposal_v1, FailedMetadataChangeProposal_v1** - - Analogous to the MCE topic, proposals that get produced into the MetadataChangeProposal_v1 topic, will get ingested to GMS asynchronously, and any failed ingestion will produce a failed MCP in the FailedMetadataChangeProposal_v1 topic. - - -2. **MetadataChangeLog_Versioned_v1** - - Analogous to the MAE topic, MCLs for versioned aspects will get produced into this topic. Since versioned aspects have a source of truth that can be separately backed up, the retention of this topic is short (by default 7 days). Note both this and the next topic are consumed by the same MCL processor. - - -3. **MetadataChangeLog_Timeseries_v1** - - Analogous to the MAE topics, MCLs for timeseries aspects will get produced into this topic. Since timeseries aspects do not have a source of truth, but rather gets ingested straight to elasticsearch, we set the retention of this topic to be longer (90 days). You can backup timeseries aspect by replaying this topic. - -## Configuration - -With MetadataChangeProposal and MetadataChangeLog, we will introduce a new mechanism for configuring the association between Metadata Entities & Aspects. Specifically, the Snapshot.pdl model will no longer encode this information by way of [Rest.li](http://rest.li) union. Instead, a more explicit yaml file will provide these links. This file will be leveraged at runtime to construct the in-memory Entity Registry which contains the global Metadata schema along with some additional metadata. - -An example of the configuration file that will be used for MCP & MCL, which defines a "dataset" entity that is associated with to two aspects: "datasetKey" and "datasetProfile". - -``` -# entity-registry.yml - -entities: - - name: dataset - keyAspect: datasetKey - aspects: - - datasetProfile -``` \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py deleted file mode 100644 index 330b778ebad8d48d55d09ddd42baa70ec10ae463..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py +++ /dev/null @@ -1,43 +0,0 @@ -from mmcv.cnn import ConvModule - -from mmdet.models.builder import HEADS -from .fcn_mask_head import FCNMaskHead - - -@HEADS.register_module() -class HTCMaskHead(FCNMaskHead): - - def __init__(self, with_conv_res=True, *args, **kwargs): - super(HTCMaskHead, self).__init__(*args, **kwargs) - self.with_conv_res = with_conv_res - if self.with_conv_res: - self.conv_res = ConvModule( - self.conv_out_channels, - self.conv_out_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - - def init_weights(self): - super(HTCMaskHead, self).init_weights() - if self.with_conv_res: - self.conv_res.init_weights() - - def forward(self, x, res_feat=None, return_logits=True, return_feat=True): - if res_feat is not None: - assert self.with_conv_res - res_feat = self.conv_res(res_feat) - x = x + res_feat - for conv in self.convs: - x = conv(x) - res_feat = x - outs = [] - if return_logits: - x = self.upsample(x) - if self.upsample_method == 'deconv': - x = self.relu(x) - mask_pred = self.conv_logits(x) - outs.append(mask_pred) - if return_feat: - outs.append(res_feat) - return outs if len(outs) > 1 else outs[0] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/datasets/stare.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/datasets/stare.py deleted file mode 100644 index 15fe527680755815b0f06dfed32f35ee5af02e63..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/datasets/stare.py +++ /dev/null @@ -1,39 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class STAREDataset(CustomDataset): - """STARE dataset. - - In segmentation map annotation for STARE, 0 stands for background, which is - included in 2 categories. ``reduce_zero_label`` is fixed to False. The - ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to - '.ah.png'. - """ - - CLASSES = ('background', 'vessel') - - PALETTE = [[120, 120, 120], [6, 230, 230]] - - def __init__(self, **kwargs): - super(STAREDataset, self).__init__( - img_suffix='.png', - seg_map_suffix='.ah.png', - reduce_zero_label=False, - **kwargs) - assert osp.exists(self.img_dir) diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/renderer.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/renderer.py deleted file mode 100644 index 5ae14c5cdb1785226a52ae6b71b08f01de069962..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/renderer.py +++ /dev/null @@ -1,1339 +0,0 @@ -"""PBR renderer for Python. - -Author: Matthew Matl -""" -import sys - -import numpy as np -import PIL - -from .constants import (RenderFlags, TextAlign, GLTF, BufFlags, TexFlags, - ProgramFlags, DEFAULT_Z_FAR, DEFAULT_Z_NEAR, - SHADOW_TEX_SZ, MAX_N_LIGHTS) -from .shader_program import ShaderProgramCache -from .material import MetallicRoughnessMaterial, SpecularGlossinessMaterial -from .light import PointLight, SpotLight, DirectionalLight -from .font import FontCache -from .utils import format_color_vector - -from OpenGL.GL import * - - -class Renderer(object): - """Class for handling all rendering operations on a scene. - - Note - ---- - This renderer relies on the existence of an OpenGL context and - does not create one on its own. - - Parameters - ---------- - viewport_width : int - Width of the viewport in pixels. - viewport_height : int - Width of the viewport height in pixels. - point_size : float, optional - Size of points in pixels. Defaults to 1.0. - """ - - def __init__(self, viewport_width, viewport_height, point_size=1.0): - self.dpscale = 1 - # Scaling needed on retina displays - if sys.platform == 'darwin': - self.dpscale = 2 - - self.viewport_width = viewport_width - self.viewport_height = viewport_height - self.point_size = point_size - - # Optional framebuffer for offscreen renders - self._main_fb = None - self._main_cb = None - self._main_db = None - self._main_fb_ms = None - self._main_cb_ms = None - self._main_db_ms = None - self._main_fb_dims = (None, None) - self._shadow_fb = None - self._latest_znear = DEFAULT_Z_NEAR - self._latest_zfar = DEFAULT_Z_FAR - - # Shader Program Cache - self._program_cache = ShaderProgramCache() - self._font_cache = FontCache() - self._meshes = set() - self._mesh_textures = set() - self._shadow_textures = set() - self._texture_alloc_idx = 0 - - @property - def viewport_width(self): - """int : The width of the main viewport, in pixels. - """ - return self._viewport_width - - @viewport_width.setter - def viewport_width(self, value): - self._viewport_width = self.dpscale * value - - @property - def viewport_height(self): - """int : The height of the main viewport, in pixels. - """ - return self._viewport_height - - @viewport_height.setter - def viewport_height(self, value): - self._viewport_height = self.dpscale * value - - @property - def point_size(self): - """float : The size of screen-space points, in pixels. - """ - return self._point_size - - @point_size.setter - def point_size(self, value): - self._point_size = float(value) - - def render(self, scene, flags, seg_node_map=None): - """Render a scene with the given set of flags. - - Parameters - ---------- - scene : :class:`Scene` - A scene to render. - flags : int - A specification from :class:`.RenderFlags`. - seg_node_map : dict - A map from :class:`.Node` objects to (3,) colors for each. - If specified along with flags set to :attr:`.RenderFlags.SEG`, - the color image will be a segmentation image. - - Returns - ------- - color_im : (h, w, 3) uint8 or (h, w, 4) uint8 - If :attr:`RenderFlags.OFFSCREEN` is set, the color buffer. This is - normally an RGB buffer, but if :attr:`.RenderFlags.RGBA` is set, - the buffer will be a full RGBA buffer. - depth_im : (h, w) float32 - If :attr:`RenderFlags.OFFSCREEN` is set, the depth buffer - in linear units. - """ - # Update context with meshes and textures - self._update_context(scene, flags) - - # Render necessary shadow maps - if not bool(flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG): - for ln in scene.light_nodes: - take_pass = False - if (isinstance(ln.light, DirectionalLight) and - bool(flags & RenderFlags.SHADOWS_DIRECTIONAL)): - take_pass = True - elif (isinstance(ln.light, SpotLight) and - bool(flags & RenderFlags.SHADOWS_SPOT)): - take_pass = True - elif (isinstance(ln.light, PointLight) and - bool(flags & RenderFlags.SHADOWS_POINT)): - take_pass = True - if take_pass: - self._shadow_mapping_pass(scene, ln, flags) - - # Make forward pass - retval = self._forward_pass(scene, flags, seg_node_map=seg_node_map) - - # If necessary, make normals pass - if flags & (RenderFlags.VERTEX_NORMALS | RenderFlags.FACE_NORMALS): - self._normals_pass(scene, flags) - - # Update camera settings for retrieving depth buffers - self._latest_znear = scene.main_camera_node.camera.znear - self._latest_zfar = scene.main_camera_node.camera.zfar - - return retval - - def render_text(self, text, x, y, font_name='OpenSans-Regular', - font_pt=40, color=None, scale=1.0, - align=TextAlign.BOTTOM_LEFT): - """Render text into the current viewport. - - Note - ---- - This cannot be done into an offscreen buffer. - - Parameters - ---------- - text : str - The text to render. - x : int - Horizontal pixel location of text. - y : int - Vertical pixel location of text. - font_name : str - Name of font, from the ``pyrender/fonts`` folder, or - a path to a ``.ttf`` file. - font_pt : int - Height of the text, in font points. - color : (4,) float - The color of the text. Default is black. - scale : int - Scaling factor for text. - align : int - One of the :class:`TextAlign` options which specifies where the - ``x`` and ``y`` parameters lie on the text. For example, - :attr:`TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate - the position of the bottom-left corner of the textbox. - """ - x *= self.dpscale - y *= self.dpscale - font_pt *= self.dpscale - - if color is None: - color = np.array([0.0, 0.0, 0.0, 1.0]) - else: - color = format_color_vector(color, 4) - - # Set up viewport for render - self._configure_forward_pass_viewport(0) - - # Load font - font = self._font_cache.get_font(font_name, font_pt) - if not font._in_context(): - font._add_to_context() - - # Load program - program = self._get_text_program() - program._bind() - - # Set uniforms - p = np.eye(4) - p[0,0] = 2.0 / self.viewport_width - p[0,3] = -1.0 - p[1,1] = 2.0 / self.viewport_height - p[1,3] = -1.0 - program.set_uniform('projection', p) - program.set_uniform('text_color', color) - - # Draw text - font.render_string(text, x, y, scale, align) - - def read_color_buf(self): - """Read and return the current viewport's color buffer. - - Alpha cannot be computed for an on-screen buffer. - - Returns - ------- - color_im : (h, w, 3) uint8 - The color buffer in RGB byte format. - """ - # Extract color image from frame buffer - width, height = self.viewport_width, self.viewport_height - glBindFramebuffer(GL_READ_FRAMEBUFFER, 0) - glReadBuffer(GL_FRONT) - color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE) - - # Re-format them into numpy arrays - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 3)) - color_im = np.flip(color_im, axis=0) - - # Resize for macos if needed - if sys.platform == 'darwin': - color_im = self._resize_image(color_im, True) - - return color_im - - def read_depth_buf(self): - """Read and return the current viewport's color buffer. - - Returns - ------- - depth_im : (h, w) float32 - The depth buffer in linear units. - """ - width, height = self.viewport_width, self.viewport_height - glBindFramebuffer(GL_READ_FRAMEBUFFER, 0) - glReadBuffer(GL_FRONT) - depth_buf = glReadPixels( - 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT - ) - - depth_im = np.frombuffer(depth_buf, dtype=np.float32) - depth_im = depth_im.reshape((height, width)) - depth_im = np.flip(depth_im, axis=0) - - inf_inds = (depth_im == 1.0) - depth_im = 2.0 * depth_im - 1.0 - z_near, z_far = self._latest_znear, self._latest_zfar - noninf = np.logical_not(inf_inds) - if z_far is None: - depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf]) - else: - depth_im[noninf] = ((2.0 * z_near * z_far) / - (z_far + z_near - depth_im[noninf] * - (z_far - z_near))) - depth_im[inf_inds] = 0.0 - - # Resize for macos if needed - if sys.platform == 'darwin': - depth_im = self._resize_image(depth_im) - - return depth_im - - def delete(self): - """Free all allocated OpenGL resources. - """ - # Free shaders - self._program_cache.clear() - - # Free fonts - self._font_cache.clear() - - # Free meshes - for mesh in self._meshes: - for p in mesh.primitives: - p.delete() - - # Free textures - for mesh_texture in self._mesh_textures: - mesh_texture.delete() - - for shadow_texture in self._shadow_textures: - shadow_texture.delete() - - self._meshes = set() - self._mesh_textures = set() - self._shadow_textures = set() - self._texture_alloc_idx = 0 - - self._delete_main_framebuffer() - self._delete_shadow_framebuffer() - - def __del__(self): - try: - self.delete() - except Exception: - pass - - ########################################################################### - # Rendering passes - ########################################################################### - - def _forward_pass(self, scene, flags, seg_node_map=None): - # Set up viewport for render - self._configure_forward_pass_viewport(flags) - - # Clear it - if bool(flags & RenderFlags.SEG): - glClearColor(0.0, 0.0, 0.0, 1.0) - if seg_node_map is None: - seg_node_map = {} - else: - glClearColor(*scene.bg_color) - - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - - if not bool(flags & RenderFlags.SEG): - glEnable(GL_MULTISAMPLE) - else: - glDisable(GL_MULTISAMPLE) - - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - program = None - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - # If SEG, set color - if bool(flags & RenderFlags.SEG): - if node not in seg_node_map: - continue - color = seg_node_map[node] - if not isinstance(color, (list, tuple, np.ndarray)): - color = np.repeat(color, 3) - else: - color = np.asanyarray(color) - color = color / 255.0 - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.USE_MATERIAL - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - if bool(flags & RenderFlags.SEG): - program.set_uniform('color', color) - - # Next, bind the lighting - if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.FLAT or - flags & RenderFlags.SEG): - self._bind_lighting(scene, program, node, flags) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=flags - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - # If doing offscreen render, copy result from framebuffer and return - if flags & RenderFlags.OFFSCREEN: - return self._read_main_framebuffer(scene, flags) - else: - return - - def _shadow_mapping_pass(self, scene, light_node, flags): - light = light_node.light - - # Set up viewport for render - self._configure_shadow_mapping_viewport(light, flags) - - # Set up camera matrices - V, P = self._get_light_cam_matrices(scene, light_node, flags) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.NONE - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=RenderFlags.DEPTH_ONLY - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - def _normals_pass(self, scene, flags): - # Set up viewport for render - self._configure_forward_pass_viewport(flags) - program = None - - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # Skip objects that don't have normals - if not primitive.buf_flags & BufFlags.NORMAL: - continue - - # First, get and bind the appropriate program - pf = ProgramFlags.NONE - if flags & RenderFlags.VERTEX_NORMALS: - pf = pf | ProgramFlags.VERTEX_NORMALS - if flags & RenderFlags.FACE_NORMALS: - pf = pf | ProgramFlags.FACE_NORMALS - program = self._get_primitive_program(primitive, flags, pf) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform('normal_magnitude', 0.05 * primitive.scale) - program.set_uniform( - 'normal_color', np.array([0.1, 0.1, 1.0, 1.0]) - ) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=RenderFlags.DEPTH_ONLY - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - ########################################################################### - # Handlers for binding uniforms and drawing primitives - ########################################################################### - - def _bind_and_draw_primitive(self, primitive, pose, program, flags): - # Set model pose matrix - program.set_uniform('M', pose) - - # Bind mesh buffers - primitive._bind() - - # Bind mesh material - if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG): - material = primitive.material - - # Bind textures - tf = material.tex_flags - if tf & TexFlags.NORMAL: - self._bind_texture(material.normalTexture, - 'material.normal_texture', program) - if tf & TexFlags.OCCLUSION: - self._bind_texture(material.occlusionTexture, - 'material.occlusion_texture', program) - if tf & TexFlags.EMISSIVE: - self._bind_texture(material.emissiveTexture, - 'material.emissive_texture', program) - if tf & TexFlags.BASE_COLOR: - self._bind_texture(material.baseColorTexture, - 'material.base_color_texture', program) - if tf & TexFlags.METALLIC_ROUGHNESS: - self._bind_texture(material.metallicRoughnessTexture, - 'material.metallic_roughness_texture', - program) - if tf & TexFlags.DIFFUSE: - self._bind_texture(material.diffuseTexture, - 'material.diffuse_texture', program) - if tf & TexFlags.SPECULAR_GLOSSINESS: - self._bind_texture(material.specularGlossinessTexture, - 'material.specular_glossiness_texture', - program) - - # Bind other uniforms - b = 'material.{}' - program.set_uniform(b.format('emissive_factor'), - material.emissiveFactor) - if isinstance(material, MetallicRoughnessMaterial): - program.set_uniform(b.format('base_color_factor'), - material.baseColorFactor) - program.set_uniform(b.format('metallic_factor'), - material.metallicFactor) - program.set_uniform(b.format('roughness_factor'), - material.roughnessFactor) - elif isinstance(material, SpecularGlossinessMaterial): - program.set_uniform(b.format('diffuse_factor'), - material.diffuseFactor) - program.set_uniform(b.format('specular_factor'), - material.specularFactor) - program.set_uniform(b.format('glossiness_factor'), - material.glossinessFactor) - - # Set blending options - if material.alphaMode == 'BLEND': - glEnable(GL_BLEND) - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) - else: - glEnable(GL_BLEND) - glBlendFunc(GL_ONE, GL_ZERO) - - # Set wireframe mode - wf = material.wireframe - if flags & RenderFlags.FLIP_WIREFRAME: - wf = not wf - if (flags & RenderFlags.ALL_WIREFRAME) or wf: - glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) - else: - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - - # Set culling mode - if material.doubleSided or flags & RenderFlags.SKIP_CULL_FACES: - glDisable(GL_CULL_FACE) - else: - glEnable(GL_CULL_FACE) - glCullFace(GL_BACK) - else: - glEnable(GL_CULL_FACE) - glEnable(GL_BLEND) - glCullFace(GL_BACK) - glBlendFunc(GL_ONE, GL_ZERO) - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - - # Set point size if needed - glDisable(GL_PROGRAM_POINT_SIZE) - if primitive.mode == GLTF.POINTS: - glEnable(GL_PROGRAM_POINT_SIZE) - glPointSize(self.point_size) - - # Render mesh - n_instances = 1 - if primitive.poses is not None: - n_instances = len(primitive.poses) - - if primitive.indices is not None: - glDrawElementsInstanced( - primitive.mode, primitive.indices.size, GL_UNSIGNED_INT, - ctypes.c_void_p(0), n_instances - ) - else: - glDrawArraysInstanced( - primitive.mode, 0, len(primitive.positions), n_instances - ) - - # Unbind mesh buffers - primitive._unbind() - - def _bind_lighting(self, scene, program, node, flags): - """Bind all lighting uniform values for a scene. - """ - max_n_lights = self._compute_max_n_lights(flags) - - n_d = min(len(scene.directional_light_nodes), max_n_lights[0]) - n_s = min(len(scene.spot_light_nodes), max_n_lights[1]) - n_p = min(len(scene.point_light_nodes), max_n_lights[2]) - program.set_uniform('ambient_light', scene.ambient_light) - program.set_uniform('n_directional_lights', n_d) - program.set_uniform('n_spot_lights', n_s) - program.set_uniform('n_point_lights', n_p) - plc = 0 - slc = 0 - dlc = 0 - - light_nodes = scene.light_nodes - if (len(scene.directional_light_nodes) > max_n_lights[0] or - len(scene.spot_light_nodes) > max_n_lights[1] or - len(scene.point_light_nodes) > max_n_lights[2]): - light_nodes = self._sorted_nodes_by_distance( - scene, scene.light_nodes, node - ) - - for n in light_nodes: - light = n.light - pose = scene.get_pose(n) - position = pose[:3,3] - direction = -pose[:3,2] - - if isinstance(light, PointLight): - if plc == max_n_lights[2]: - continue - b = 'point_lights[{}].'.format(plc) - plc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_POINT) - program.set_uniform(b + 'position', position) - elif isinstance(light, SpotLight): - if slc == max_n_lights[1]: - continue - b = 'spot_lights[{}].'.format(slc) - slc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_SPOT) - las = 1.0 / max(0.001, np.cos(light.innerConeAngle) - - np.cos(light.outerConeAngle)) - lao = -np.cos(light.outerConeAngle) * las - program.set_uniform(b + 'direction', direction) - program.set_uniform(b + 'position', position) - program.set_uniform(b + 'light_angle_scale', las) - program.set_uniform(b + 'light_angle_offset', lao) - else: - if dlc == max_n_lights[0]: - continue - b = 'directional_lights[{}].'.format(dlc) - dlc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_DIRECTIONAL) - program.set_uniform(b + 'direction', direction) - - program.set_uniform(b + 'color', light.color) - program.set_uniform(b + 'intensity', light.intensity) - # if light.range is not None: - # program.set_uniform(b + 'range', light.range) - # else: - # program.set_uniform(b + 'range', 0) - - if shadow: - self._bind_texture(light.shadow_texture, - b + 'shadow_map', program) - if not isinstance(light, PointLight): - V, P = self._get_light_cam_matrices(scene, n, flags) - program.set_uniform(b + 'light_matrix', P.dot(V)) - else: - raise NotImplementedError( - 'Point light shadows not implemented' - ) - - def _sorted_mesh_nodes(self, scene): - cam_loc = scene.get_pose(scene.main_camera_node)[:3,3] - solid_nodes = [] - trans_nodes = [] - for node in scene.mesh_nodes: - mesh = node.mesh - if mesh.is_transparent: - trans_nodes.append(node) - else: - solid_nodes.append(node) - - # TODO BETTER SORTING METHOD - trans_nodes.sort( - key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc) - ) - solid_nodes.sort( - key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc) - ) - - return solid_nodes + trans_nodes - - def _sorted_nodes_by_distance(self, scene, nodes, compare_node): - nodes = list(nodes) - compare_posn = scene.get_pose(compare_node)[:3,3] - nodes.sort(key=lambda n: np.linalg.norm( - scene.get_pose(n)[:3,3] - compare_posn) - ) - return nodes - - ########################################################################### - # Context Management - ########################################################################### - - def _update_context(self, scene, flags): - - # Update meshes - scene_meshes = scene.meshes - - # Add new meshes to context - for mesh in scene_meshes - self._meshes: - for p in mesh.primitives: - p._add_to_context() - - # Remove old meshes from context - for mesh in self._meshes - scene_meshes: - for p in mesh.primitives: - p.delete() - - self._meshes = scene_meshes.copy() - - # Update mesh textures - mesh_textures = set() - for m in scene_meshes: - for p in m.primitives: - mesh_textures |= p.material.textures - - # Add new textures to context - for texture in mesh_textures - self._mesh_textures: - texture._add_to_context() - - # Remove old textures from context - for texture in self._mesh_textures - mesh_textures: - texture.delete() - - self._mesh_textures = mesh_textures.copy() - - shadow_textures = set() - for l in scene.lights: - # Create if needed - active = False - if (isinstance(l, DirectionalLight) and - flags & RenderFlags.SHADOWS_DIRECTIONAL): - active = True - elif (isinstance(l, PointLight) and - flags & RenderFlags.SHADOWS_POINT): - active = True - elif isinstance(l, SpotLight) and flags & RenderFlags.SHADOWS_SPOT: - active = True - - if active and l.shadow_texture is None: - l._generate_shadow_texture() - if l.shadow_texture is not None: - shadow_textures.add(l.shadow_texture) - - # Add new textures to context - for texture in shadow_textures - self._shadow_textures: - texture._add_to_context() - - # Remove old textures from context - for texture in self._shadow_textures - shadow_textures: - texture.delete() - - self._shadow_textures = shadow_textures.copy() - - ########################################################################### - # Texture Management - ########################################################################### - - def _bind_texture(self, texture, uniform_name, program): - """Bind a texture to an active texture unit and return - the texture unit index that was used. - """ - tex_id = self._get_next_active_texture() - glActiveTexture(GL_TEXTURE0 + tex_id) - texture._bind() - program.set_uniform(uniform_name, tex_id) - - def _get_next_active_texture(self): - val = self._texture_alloc_idx - self._texture_alloc_idx += 1 - return val - - def _reset_active_textures(self): - self._texture_alloc_idx = 0 - - ########################################################################### - # Camera Matrix Management - ########################################################################### - - def _get_camera_matrices(self, scene): - main_camera_node = scene.main_camera_node - if main_camera_node is None: - raise ValueError('Cannot render scene without a camera') - P = main_camera_node.camera.get_projection_matrix( - width=self.viewport_width, height=self.viewport_height - ) - pose = scene.get_pose(main_camera_node) - V = np.linalg.inv(pose) # V maps from world to camera - return V, P - - def _get_light_cam_matrices(self, scene, light_node, flags): - light = light_node.light - pose = scene.get_pose(light_node).copy() - s = scene.scale - camera = light._get_shadow_camera(s) - P = camera.get_projection_matrix() - if isinstance(light, DirectionalLight): - direction = -pose[:3,2] - c = scene.centroid - loc = c - direction * s - pose[:3,3] = loc - V = np.linalg.inv(pose) # V maps from world to camera - return V, P - - ########################################################################### - # Shader Program Management - ########################################################################### - - def _get_text_program(self): - program = self._program_cache.get_program( - vertex_shader='text.vert', - fragment_shader='text.frag' - ) - - if not program._in_context(): - program._add_to_context() - - return program - - def _compute_max_n_lights(self, flags): - max_n_lights = [MAX_N_LIGHTS, MAX_N_LIGHTS, MAX_N_LIGHTS] - n_tex_units = glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS) - - # Reserved texture units: 6 - # Normal Map - # Occlusion Map - # Emissive Map - # Base Color or Diffuse Map - # MR or SG Map - # Environment cubemap - - n_reserved_textures = 6 - n_available_textures = n_tex_units - n_reserved_textures - - # Distribute textures evenly among lights with shadows, with - # a preference for directional lights - n_shadow_types = 0 - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - n_shadow_types += 1 - if flags & RenderFlags.SHADOWS_SPOT: - n_shadow_types += 1 - if flags & RenderFlags.SHADOWS_POINT: - n_shadow_types += 1 - - if n_shadow_types > 0: - tex_per_light = n_available_textures // n_shadow_types - - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - max_n_lights[0] = ( - tex_per_light + - (n_available_textures - tex_per_light * n_shadow_types) - ) - if flags & RenderFlags.SHADOWS_SPOT: - max_n_lights[1] = tex_per_light - if flags & RenderFlags.SHADOWS_POINT: - max_n_lights[2] = tex_per_light - - return max_n_lights - - def _get_primitive_program(self, primitive, flags, program_flags): - vertex_shader = None - fragment_shader = None - geometry_shader = None - defines = {} - - if (bool(program_flags & ProgramFlags.USE_MATERIAL) and - not flags & RenderFlags.DEPTH_ONLY and - not flags & RenderFlags.FLAT and - not flags & RenderFlags.SEG): - vertex_shader = 'mesh.vert' - fragment_shader = 'mesh.frag' - elif bool(program_flags & (ProgramFlags.VERTEX_NORMALS | - ProgramFlags.FACE_NORMALS)): - vertex_shader = 'vertex_normals.vert' - if primitive.mode == GLTF.POINTS: - geometry_shader = 'vertex_normals_pc.geom' - else: - geometry_shader = 'vertex_normals.geom' - fragment_shader = 'vertex_normals.frag' - elif flags & RenderFlags.FLAT: - vertex_shader = 'flat.vert' - fragment_shader = 'flat.frag' - elif flags & RenderFlags.SEG: - vertex_shader = 'segmentation.vert' - fragment_shader = 'segmentation.frag' - else: - vertex_shader = 'mesh_depth.vert' - fragment_shader = 'mesh_depth.frag' - - # Set up vertex buffer DEFINES - bf = primitive.buf_flags - buf_idx = 1 - if bf & BufFlags.NORMAL: - defines['NORMAL_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TANGENT: - defines['TANGENT_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TEXCOORD_0: - defines['TEXCOORD_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TEXCOORD_1: - defines['TEXCOORD_1_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.COLOR_0: - defines['COLOR_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.JOINTS_0: - defines['JOINTS_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.WEIGHTS_0: - defines['WEIGHTS_0_LOC'] = buf_idx - buf_idx += 1 - defines['INST_M_LOC'] = buf_idx - - # Set up shadow mapping defines - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - defines['DIRECTIONAL_LIGHT_SHADOWS'] = 1 - if flags & RenderFlags.SHADOWS_SPOT: - defines['SPOT_LIGHT_SHADOWS'] = 1 - if flags & RenderFlags.SHADOWS_POINT: - defines['POINT_LIGHT_SHADOWS'] = 1 - max_n_lights = self._compute_max_n_lights(flags) - defines['MAX_DIRECTIONAL_LIGHTS'] = max_n_lights[0] - defines['MAX_SPOT_LIGHTS'] = max_n_lights[1] - defines['MAX_POINT_LIGHTS'] = max_n_lights[2] - - # Set up vertex normal defines - if program_flags & ProgramFlags.VERTEX_NORMALS: - defines['VERTEX_NORMALS'] = 1 - if program_flags & ProgramFlags.FACE_NORMALS: - defines['FACE_NORMALS'] = 1 - - # Set up material texture defines - if bool(program_flags & ProgramFlags.USE_MATERIAL): - tf = primitive.material.tex_flags - if tf & TexFlags.NORMAL: - defines['HAS_NORMAL_TEX'] = 1 - if tf & TexFlags.OCCLUSION: - defines['HAS_OCCLUSION_TEX'] = 1 - if tf & TexFlags.EMISSIVE: - defines['HAS_EMISSIVE_TEX'] = 1 - if tf & TexFlags.BASE_COLOR: - defines['HAS_BASE_COLOR_TEX'] = 1 - if tf & TexFlags.METALLIC_ROUGHNESS: - defines['HAS_METALLIC_ROUGHNESS_TEX'] = 1 - if tf & TexFlags.DIFFUSE: - defines['HAS_DIFFUSE_TEX'] = 1 - if tf & TexFlags.SPECULAR_GLOSSINESS: - defines['HAS_SPECULAR_GLOSSINESS_TEX'] = 1 - if isinstance(primitive.material, MetallicRoughnessMaterial): - defines['USE_METALLIC_MATERIAL'] = 1 - elif isinstance(primitive.material, SpecularGlossinessMaterial): - defines['USE_GLOSSY_MATERIAL'] = 1 - - program = self._program_cache.get_program( - vertex_shader=vertex_shader, - fragment_shader=fragment_shader, - geometry_shader=geometry_shader, - defines=defines - ) - - if not program._in_context(): - program._add_to_context() - - return program - - ########################################################################### - # Viewport Management - ########################################################################### - - def _configure_forward_pass_viewport(self, flags): - - # If using offscreen render, bind main framebuffer - if flags & RenderFlags.OFFSCREEN: - self._configure_main_framebuffer() - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms) - else: - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) - - glViewport(0, 0, self.viewport_width, self.viewport_height) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - - def _configure_shadow_mapping_viewport(self, light, flags): - self._configure_shadow_framebuffer() - glBindFramebuffer(GL_FRAMEBUFFER, self._shadow_fb) - light.shadow_texture._bind() - light.shadow_texture._bind_as_depth_attachment() - glActiveTexture(GL_TEXTURE0) - light.shadow_texture._bind() - glDrawBuffer(GL_NONE) - glReadBuffer(GL_NONE) - - glClear(GL_DEPTH_BUFFER_BIT) - glViewport(0, 0, SHADOW_TEX_SZ, SHADOW_TEX_SZ) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - glDisable(GL_CULL_FACE) - glDisable(GL_BLEND) - - ########################################################################### - # Framebuffer Management - ########################################################################### - - def _configure_shadow_framebuffer(self): - if self._shadow_fb is None: - self._shadow_fb = glGenFramebuffers(1) - - def _delete_shadow_framebuffer(self): - if self._shadow_fb is not None: - glDeleteFramebuffers(1, [self._shadow_fb]) - - def _configure_main_framebuffer(self): - # If mismatch with prior framebuffer, delete it - if (self._main_fb is not None and - self.viewport_width != self._main_fb_dims[0] or - self.viewport_height != self._main_fb_dims[1]): - self._delete_main_framebuffer() - - # If framebuffer doesn't exist, create it - if self._main_fb is None: - # Generate standard buffer - self._main_cb, self._main_db = glGenRenderbuffers(2) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb) - glRenderbufferStorage( - GL_RENDERBUFFER, GL_RGBA, - self.viewport_width, self.viewport_height - ) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_db) - glRenderbufferStorage( - GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, - self.viewport_width, self.viewport_height - ) - - self._main_fb = glGenFramebuffers(1) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_RENDERBUFFER, self._main_cb - ) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_RENDERBUFFER, self._main_db - ) - - # Generate multisample buffer - self._main_cb_ms, self._main_db_ms = glGenRenderbuffers(2) - glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb_ms) - # glRenderbufferStorageMultisample( - # GL_RENDERBUFFER, 4, GL_RGBA, - # self.viewport_width, self.viewport_height - # ) - # glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) - # glRenderbufferStorageMultisample( - # GL_RENDERBUFFER, 4, GL_DEPTH_COMPONENT24, - # self.viewport_width, self.viewport_height - # ) - # 增加这一行 - num_samples = min(glGetIntegerv(GL_MAX_SAMPLES), 4) # No more than GL_MAX_SAMPLES - - # 其实就是把 4 替换成 num_samples,其余不变 - glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_RGBA, self.viewport_width, self.viewport_height) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) # 这行不变 - - # 这一行也是将 4 替换成 num_samples - glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_DEPTH_COMPONENT24, self.viewport_width, self.viewport_height) - - self._main_fb_ms = glGenFramebuffers(1) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_RENDERBUFFER, self._main_cb_ms - ) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_RENDERBUFFER, self._main_db_ms - ) - - self._main_fb_dims = (self.viewport_width, self.viewport_height) - - def _delete_main_framebuffer(self): - if self._main_fb is not None: - glDeleteFramebuffers(2, [self._main_fb, self._main_fb_ms]) - if self._main_cb is not None: - glDeleteRenderbuffers(2, [self._main_cb, self._main_cb_ms]) - if self._main_db is not None: - glDeleteRenderbuffers(2, [self._main_db, self._main_db_ms]) - - self._main_fb = None - self._main_cb = None - self._main_db = None - self._main_fb_ms = None - self._main_cb_ms = None - self._main_db_ms = None - self._main_fb_dims = (None, None) - - def _read_main_framebuffer(self, scene, flags): - width, height = self._main_fb_dims[0], self._main_fb_dims[1] - - # Bind framebuffer and blit buffers - glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb_ms) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb) - glBlitFramebuffer( - 0, 0, width, height, 0, 0, width, height, - GL_COLOR_BUFFER_BIT, GL_LINEAR - ) - glBlitFramebuffer( - 0, 0, width, height, 0, 0, width, height, - GL_DEPTH_BUFFER_BIT, GL_NEAREST - ) - glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb) - - # Read depth - depth_buf = glReadPixels( - 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT - ) - depth_im = np.frombuffer(depth_buf, dtype=np.float32) - depth_im = depth_im.reshape((height, width)) - depth_im = np.flip(depth_im, axis=0) - inf_inds = (depth_im == 1.0) - depth_im = 2.0 * depth_im - 1.0 - z_near = scene.main_camera_node.camera.znear - z_far = scene.main_camera_node.camera.zfar - noninf = np.logical_not(inf_inds) - if z_far is None: - depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf]) - else: - depth_im[noninf] = ((2.0 * z_near * z_far) / - (z_far + z_near - depth_im[noninf] * - (z_far - z_near))) - depth_im[inf_inds] = 0.0 - - # Resize for macos if needed - if sys.platform == 'darwin': - depth_im = self._resize_image(depth_im) - - if flags & RenderFlags.DEPTH_ONLY: - return depth_im - - # Read color - if flags & RenderFlags.RGBA: - color_buf = glReadPixels( - 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE - ) - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 4)) - else: - color_buf = glReadPixels( - 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE - ) - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 3)) - color_im = np.flip(color_im, axis=0) - - # Resize for macos if needed - if sys.platform == 'darwin': - color_im = self._resize_image(color_im, True) - - return color_im, depth_im - - def _resize_image(self, value, antialias=False): - """If needed, rescale the render for MacOS.""" - img = PIL.Image.fromarray(value) - resample = PIL.Image.NEAREST - if antialias: - resample = PIL.Image.BILINEAR - size = (self.viewport_width // self.dpscale, - self.viewport_height // self.dpscale) - img = img.resize(size, resample=resample) - return np.array(img) - - ########################################################################### - # Shadowmap Debugging - ########################################################################### - - def _forward_pass_no_reset(self, scene, flags): - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.USE_MATERIAL - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - - # Next, bind the lighting - if not flags & RenderFlags.DEPTH_ONLY and not flags & RenderFlags.FLAT: - self._bind_lighting(scene, program, node, flags) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=flags - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - def _render_light_shadowmaps(self, scene, light_nodes, flags, tile=False): - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) - glClearColor(*scene.bg_color) - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - - w = self.viewport_width - h = self.viewport_height - - num_nodes = len(light_nodes) - viewport_dims = { - (0, 2): [0, h // 2, w // 2, h], - (1, 2): [w // 2, h // 2, w, h], - (0, 3): [0, h // 2, w // 2, h], - (1, 3): [w // 2, h // 2, w, h], - (2, 3): [0, 0, w // 2, h // 2], - (0, 4): [0, h // 2, w // 2, h], - (1, 4): [w // 2, h // 2, w, h], - (2, 4): [0, 0, w // 2, h // 2], - (3, 4): [w // 2, 0, w, h // 2] - } - - if tile: - for i, ln in enumerate(light_nodes): - light = ln.light - - if light.shadow_texture is None: - raise ValueError('Light does not have a shadow texture') - - glViewport(*viewport_dims[(i, num_nodes + 1)]) - - program = self._get_debug_quad_program() - program._bind() - self._bind_texture(light.shadow_texture, 'depthMap', program) - self._render_debug_quad() - self._reset_active_textures() - glFlush() - i += 1 - glViewport(*viewport_dims[(i, num_nodes + 1)]) - self._forward_pass_no_reset(scene, flags) - else: - for i, ln in enumerate(light_nodes): - light = ln.light - - if light.shadow_texture is None: - raise ValueError('Light does not have a shadow texture') - - glViewport(0, 0, self.viewport_width, self.viewport_height) - - program = self._get_debug_quad_program() - program._bind() - self._bind_texture(light.shadow_texture, 'depthMap', program) - self._render_debug_quad() - self._reset_active_textures() - glFlush() - return - - def _get_debug_quad_program(self): - program = self._program_cache.get_program( - vertex_shader='debug_quad.vert', - fragment_shader='debug_quad.frag' - ) - if not program._in_context(): - program._add_to_context() - return program - - def _render_debug_quad(self): - x = glGenVertexArrays(1) - glBindVertexArray(x) - glDrawArrays(GL_TRIANGLES, 0, 6) - glBindVertexArray(0) - glDeleteVertexArrays(1, [x]) diff --git a/spaces/afiz/sepia-image/README.md b/spaces/afiz/sepia-image/README.md deleted file mode 100644 index 5ef9605ee28baa91a5897b950cfe9ff4e806a57b..0000000000000000000000000000000000000000 --- a/spaces/afiz/sepia-image/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sepia Image -emoji: 📚 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.1.5 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/base_callbacks.py b/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/base_callbacks.py deleted file mode 100644 index ef62dd591f1516aa41e2ba347cc3aaa558854f8d..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/base_callbacks.py +++ /dev/null @@ -1,44 +0,0 @@ -import logging -import os -from typing import NoReturn - -import pytorch_lightning as pl -import torch -import torch.nn as nn -from pytorch_lightning.utilities import rank_zero_only - - -class SaveCheckpointsCallback(pl.Callback): - def __init__( - self, - model: nn.Module, - checkpoints_dir: str, - save_step_frequency: int, - ): - r"""Callback to save checkpoints every #save_step_frequency steps. - - Args: - model: nn.Module - checkpoints_dir: str, directory to save checkpoints - save_step_frequency: int - """ - self.model = model - self.checkpoints_dir = checkpoints_dir - self.save_step_frequency = save_step_frequency - os.makedirs(self.checkpoints_dir, exist_ok=True) - - @rank_zero_only - def on_batch_end(self, trainer: pl.Trainer, _) -> NoReturn: - r"""Save checkpoint.""" - global_step = trainer.global_step - - if global_step % self.save_step_frequency == 0: - - checkpoint_path = os.path.join( - self.checkpoints_dir, "step={}.pth".format(global_step) - ) - - checkpoint = {'step': global_step, 'model': self.model.state_dict()} - - torch.save(checkpoint, checkpoint_path) - logging.info("Save checkpoint to {}".format(checkpoint_path)) diff --git a/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py b/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py deleted file mode 100644 index 8bd45a930d3dc84912e58659ee575be08e9038f0..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# File : test_numeric_batchnorm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. - -import unittest - -import torch -import torch.nn as nn -from torch.autograd import Variable - -from sync_batchnorm.unittest import TorchTestCase - - -def handy_var(a, unbias=True): - n = a.size(0) - asum = a.sum(dim=0) - as_sum = (a ** 2).sum(dim=0) # a square sum - sumvar = as_sum - asum * asum / n - if unbias: - return sumvar / (n - 1) - else: - return sumvar / n - - -class NumericTestCase(TorchTestCase): - def testNumericBatchNorm(self): - a = torch.rand(16, 10) - bn = nn.BatchNorm2d(10, momentum=1, eps=1e-5, affine=False) - bn.train() - - a_var1 = Variable(a, requires_grad=True) - b_var1 = bn(a_var1) - loss1 = b_var1.sum() - loss1.backward() - - a_var2 = Variable(a, requires_grad=True) - a_mean2 = a_var2.mean(dim=0, keepdim=True) - a_std2 = torch.sqrt(handy_var(a_var2, unbias=False).clamp(min=1e-5)) - # a_std2 = torch.sqrt(a_var2.var(dim=0, keepdim=True, unbiased=False) + 1e-5) - b_var2 = (a_var2 - a_mean2) / a_std2 - loss2 = b_var2.sum() - loss2.backward() - - self.assertTensorClose(bn.running_mean, a.mean(dim=0)) - self.assertTensorClose(bn.running_var, handy_var(a)) - self.assertTensorClose(a_var1.data, a_var2.data) - self.assertTensorClose(b_var1.data, b_var2.data) - self.assertTensorClose(a_var1.grad, a_var2.grad) - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/akhaliq/stylegan3_clip/torch_utils/training_stats.py b/spaces/akhaliq/stylegan3_clip/torch_utils/training_stats.py deleted file mode 100644 index 64e7835210d51d923e6a45240d27020a20e219de..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/torch_utils/training_stats.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Facilities for reporting and collecting training statistics across -multiple processes and devices. The interface is designed to minimize -synchronization overhead as well as the amount of boilerplate in user -code.""" - -import re -import numpy as np -import torch -import dnnlib - -from . import misc - -#---------------------------------------------------------------------------- - -_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares] -_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction. -_counter_dtype = torch.float64 # Data type to use for the internal counters. -_rank = 0 # Rank of the current process. -_sync_device = None # Device to use for multiprocess communication. None = single-process. -_sync_called = False # Has _sync() been called yet? -_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor -_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor - -#---------------------------------------------------------------------------- - -def init_multiprocessing(rank, sync_device): - r"""Initializes `torch_utils.training_stats` for collecting statistics - across multiple processes. - - This function must be called after - `torch.distributed.init_process_group()` and before `Collector.update()`. - The call is not necessary if multi-process collection is not needed. - - Args: - rank: Rank of the current process. - sync_device: PyTorch device to use for inter-process - communication, or None to disable multi-process - collection. Typically `torch.device('cuda', rank)`. - """ - global _rank, _sync_device - assert not _sync_called - _rank = rank - _sync_device = sync_device - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def report(name, value): - r"""Broadcasts the given set of scalars to all interested instances of - `Collector`, across device and process boundaries. - - This function is expected to be extremely cheap and can be safely - called from anywhere in the training loop, loss function, or inside a - `torch.nn.Module`. - - Warning: The current implementation expects the set of unique names to - be consistent across processes. Please make sure that `report()` is - called at least once for each unique name by each process, and in the - same order. If a given process has no scalars to broadcast, it can do - `report(name, [])` (empty list). - - Args: - name: Arbitrary string specifying the name of the statistic. - Averages are accumulated separately for each unique name. - value: Arbitrary set of scalars. Can be a list, tuple, - NumPy array, PyTorch tensor, or Python scalar. - - Returns: - The same `value` that was passed in. - """ - if name not in _counters: - _counters[name] = dict() - - elems = torch.as_tensor(value) - if elems.numel() == 0: - return value - - elems = elems.detach().flatten().to(_reduce_dtype) - moments = torch.stack([ - torch.ones_like(elems).sum(), - elems.sum(), - elems.square().sum(), - ]) - assert moments.ndim == 1 and moments.shape[0] == _num_moments - moments = moments.to(_counter_dtype) - - device = moments.device - if device not in _counters[name]: - _counters[name][device] = torch.zeros_like(moments) - _counters[name][device].add_(moments) - return value - -#---------------------------------------------------------------------------- - -def report0(name, value): - r"""Broadcasts the given set of scalars by the first process (`rank = 0`), - but ignores any scalars provided by the other processes. - See `report()` for further details. - """ - report(name, value if _rank == 0 else []) - return value - -#---------------------------------------------------------------------------- - -class Collector: - r"""Collects the scalars broadcasted by `report()` and `report0()` and - computes their long-term averages (mean and standard deviation) over - user-defined periods of time. - - The averages are first collected into internal counters that are not - directly visible to the user. They are then copied to the user-visible - state as a result of calling `update()` and can then be queried using - `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the - internal counters for the next round, so that the user-visible state - effectively reflects averages collected between the last two calls to - `update()`. - - Args: - regex: Regular expression defining which statistics to - collect. The default is to collect everything. - keep_previous: Whether to retain the previous averages if no - scalars were collected on a given round - (default: True). - """ - def __init__(self, regex='.*', keep_previous=True): - self._regex = re.compile(regex) - self._keep_previous = keep_previous - self._cumulative = dict() - self._moments = dict() - self.update() - self._moments.clear() - - def names(self): - r"""Returns the names of all statistics broadcasted so far that - match the regular expression specified at construction time. - """ - return [name for name in _counters if self._regex.fullmatch(name)] - - def update(self): - r"""Copies current values of the internal counters to the - user-visible state and resets them for the next round. - - If `keep_previous=True` was specified at construction time, the - operation is skipped for statistics that have received no scalars - since the last update, retaining their previous averages. - - This method performs a number of GPU-to-CPU transfers and one - `torch.distributed.all_reduce()`. It is intended to be called - periodically in the main training loop, typically once every - N training steps. - """ - if not self._keep_previous: - self._moments.clear() - for name, cumulative in _sync(self.names()): - if name not in self._cumulative: - self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) - delta = cumulative - self._cumulative[name] - self._cumulative[name].copy_(cumulative) - if float(delta[0]) != 0: - self._moments[name] = delta - - def _get_delta(self, name): - r"""Returns the raw moments that were accumulated for the given - statistic between the last two calls to `update()`, or zero if - no scalars were collected. - """ - assert self._regex.fullmatch(name) - if name not in self._moments: - self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype) - return self._moments[name] - - def num(self, name): - r"""Returns the number of scalars that were accumulated for the given - statistic between the last two calls to `update()`, or zero if - no scalars were collected. - """ - delta = self._get_delta(name) - return int(delta[0]) - - def mean(self, name): - r"""Returns the mean of the scalars that were accumulated for the - given statistic between the last two calls to `update()`, or NaN if - no scalars were collected. - """ - delta = self._get_delta(name) - if int(delta[0]) == 0: - return float('nan') - return float(delta[1] / delta[0]) - - def std(self, name): - r"""Returns the standard deviation of the scalars that were - accumulated for the given statistic between the last two calls to - `update()`, or NaN if no scalars were collected. - """ - delta = self._get_delta(name) - if int(delta[0]) == 0 or not np.isfinite(float(delta[1])): - return float('nan') - if int(delta[0]) == 1: - return float(0) - mean = float(delta[1] / delta[0]) - raw_var = float(delta[2] / delta[0]) - return np.sqrt(max(raw_var - np.square(mean), 0)) - - def as_dict(self): - r"""Returns the averages accumulated between the last two calls to - `update()` as an `dnnlib.EasyDict`. The contents are as follows: - - dnnlib.EasyDict( - NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT), - ... - ) - """ - stats = dnnlib.EasyDict() - for name in self.names(): - stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name)) - return stats - - def __getitem__(self, name): - r"""Convenience getter. - `collector[name]` is a synonym for `collector.mean(name)`. - """ - return self.mean(name) - -#---------------------------------------------------------------------------- - -def _sync(names): - r"""Synchronize the global cumulative counters across devices and - processes. Called internally by `Collector.update()`. - """ - if len(names) == 0: - return [] - global _sync_called - _sync_called = True - - # Collect deltas within current rank. - deltas = [] - device = _sync_device if _sync_device is not None else torch.device('cpu') - for name in names: - delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device) - for counter in _counters[name].values(): - delta.add_(counter.to(device)) - counter.copy_(torch.zeros_like(counter)) - deltas.append(delta) - deltas = torch.stack(deltas) - - # Sum deltas across ranks. - if _sync_device is not None: - torch.distributed.all_reduce(deltas) - - # Update cumulative values. - deltas = deltas.cpu() - for idx, name in enumerate(names): - if name not in _cumulative: - _cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) - _cumulative[name].add_(deltas[idx]) - - # Return name-value pairs. - return [(name, _cumulative[name]) for name in names] - -#---------------------------------------------------------------------------- diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py deleted file mode 100644 index 22c50b356adf906bd6a579749d0c120f9cac8381..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -""" - Pygments - ~~~~~~~~ - - Pygments is a syntax highlighting package written in Python. - - It is a generic syntax highlighter for general use in all kinds of software - such as forum systems, wikis or other applications that need to prettify - source code. Highlights are: - - * a wide range of common languages and markup formats is supported - * special attention is paid to details, increasing quality by a fair amount - * support for new languages and formats are added easily - * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image - formats that PIL supports, and ANSI sequences - * it is usable as a command-line tool and as a library - * ... and it highlights even Brainfuck! - - The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``. - - .. _Pygments master branch: - https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev - - :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" -from io import StringIO, BytesIO - -__version__ = '2.11.2' -__docformat__ = 'restructuredtext' - -__all__ = ['lex', 'format', 'highlight'] - - -def lex(code, lexer): - """ - Lex ``code`` with ``lexer`` and return an iterable of tokens. - """ - try: - return lexer.get_tokens(code) - except TypeError as err: - if (isinstance(err.args[0], str) and - ('unbound method get_tokens' in err.args[0] or - 'missing 1 required positional argument' in err.args[0])): - raise TypeError('lex() argument must be a lexer instance, ' - 'not a class') - raise - - -def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin - """ - Format a tokenlist ``tokens`` with the formatter ``formatter``. - - If ``outfile`` is given and a valid file object (an object - with a ``write`` method), the result will be written to it, otherwise - it is returned as a string. - """ - try: - if not outfile: - realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() - formatter.format(tokens, realoutfile) - return realoutfile.getvalue() - else: - formatter.format(tokens, outfile) - except TypeError as err: - if (isinstance(err.args[0], str) and - ('unbound method format' in err.args[0] or - 'missing 1 required positional argument' in err.args[0])): - raise TypeError('format() argument must be a formatter instance, ' - 'not a class') - raise - - -def highlight(code, lexer, formatter, outfile=None): - """ - Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``. - - If ``outfile`` is given and a valid file object (an object - with a ``write`` method), the result will be written to it, otherwise - it is returned as a string. - """ - return format(lex(code, lexer), formatter, outfile) - diff --git a/spaces/algomuffin/jojo_fork/op/conv2d_gradfix.py b/spaces/algomuffin/jojo_fork/op/conv2d_gradfix.py deleted file mode 100644 index bb2f94bbcb8132299fd4d538972d32bd7ff6e7d6..0000000000000000000000000000000000000000 --- a/spaces/algomuffin/jojo_fork/op/conv2d_gradfix.py +++ /dev/null @@ -1,227 +0,0 @@ -import contextlib -import warnings - -import torch -from torch import autograd -from torch.nn import functional as F - -enabled = True -weight_gradients_disabled = False - - -@contextlib.contextmanager -def no_weight_gradients(): - global weight_gradients_disabled - - old = weight_gradients_disabled - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if could_use_op(input): - return conv2d_gradfix( - transpose=False, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=0, - dilation=dilation, - groups=groups, - ).apply(input, weight, bias) - - return F.conv2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - ) - - -def conv_transpose2d( - input, - weight, - bias=None, - stride=1, - padding=0, - output_padding=0, - groups=1, - dilation=1, -): - if could_use_op(input): - return conv2d_gradfix( - transpose=True, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=output_padding, - groups=groups, - dilation=dilation, - ).apply(input, weight, bias) - - return F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - output_padding=output_padding, - dilation=dilation, - groups=groups, - ) - - -def could_use_op(input): - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - - if input.device.type != "cuda": - return False - - if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]): - return True - - warnings.warn( - f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()." - ) - - return False - - -def ensure_tuple(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - - return xs - - -conv2d_gradfix_cache = dict() - - -def conv2d_gradfix( - transpose, weight_shape, stride, padding, output_padding, dilation, groups -): - ndim = 2 - weight_shape = tuple(weight_shape) - stride = ensure_tuple(stride, ndim) - padding = ensure_tuple(padding, ndim) - output_padding = ensure_tuple(output_padding, ndim) - dilation = ensure_tuple(dilation, ndim) - - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in conv2d_gradfix_cache: - return conv2d_gradfix_cache[key] - - common_kwargs = dict( - stride=stride, padding=padding, dilation=dilation, groups=groups - ) - - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - class Conv2d(autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - if not transpose: - out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - - else: - out = F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - output_padding=output_padding, - **common_kwargs, - ) - - ctx.save_for_backward(input, weight) - - return out - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - grad_input, grad_weight, grad_bias = None, None, None - - if ctx.needs_input_grad[0]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, weight, None) - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum((0, 2, 3)) - - return grad_input, grad_weight, grad_bias - - class Conv2dGradWeight(autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - op = torch._C._jit_get_operation( - "aten::cudnn_convolution_backward_weight" - if not transpose - else "aten::cudnn_convolution_transpose_backward_weight" - ) - flags = [ - torch.backends.cudnn.benchmark, - torch.backends.cudnn.deterministic, - torch.backends.cudnn.allow_tf32, - ] - grad_weight = op( - weight_shape, - grad_output, - input, - padding, - stride, - dilation, - groups, - *flags, - ) - ctx.save_for_backward(grad_output, input) - - return grad_weight - - @staticmethod - def backward(ctx, grad_grad_weight): - grad_output, input = ctx.saved_tensors - grad_grad_output, grad_grad_input = None, None - - if ctx.needs_input_grad[0]: - grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) - - if ctx.needs_input_grad[1]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, grad_grad_weight, None) - - return grad_grad_output, grad_grad_input - - conv2d_gradfix_cache[key] = Conv2d - - return Conv2d diff --git a/spaces/aliabd/SummerTime/dataset/non_huggingface_datasets_builders/scisummnet.py b/spaces/aliabd/SummerTime/dataset/non_huggingface_datasets_builders/scisummnet.py deleted file mode 100644 index 0b6bcfb5bfc02e09be903d988ec45d0a0a06606e..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/dataset/non_huggingface_datasets_builders/scisummnet.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -import datasets - - -"""Scisummnet dataset.""" - - -_CITATION = """ -@InProceedings{yasunaga&al.19.scisumm, - title = {{ScisummNet}: A Large Annotated Corpus and Content-Impact Models for Scientific Paper Summarization with Citation Networks}, - author = {Michihiro Yasunaga and Jungo Kasai and Rui Zhang and Alexander Fabbri and Irene Li and Dan Friedman and Dragomir Radev}, - booktitle = {Proceedings of AAAI 2019}, - year = {2019} -} -@InProceedings{yasunaga&al.17, - title = {Graph-based Neural Multi-Document Summarization}, - author = {Yasunaga, Michihiro and Zhang, Rui and Meelu, Kshitijh and Pareek, Ayush and Srinivasan, Krishnan and Radev, Dragomir R.}, - booktitle = {Proceedings of CoNLL 2017}, - year = {2017} -} -""" - -_DESCRIPTION = """ -A summary of scientific papers should ideally incorporate the impact of the papers on the research community -reflected by citations. To facilitate research in citation-aware scientific paper summarization (Scisumm), -the CL-Scisumm shared task has been organized since 2014 for papers in the computational linguistics and NLP domain. -""" - -_HOMEPAGE = "https://cs.stanford.edu/~myasu/projects/scisumm_net/" - -_LICENSE = "CC BY-SA 4.0" - -_URLs = "https://cs.stanford.edu/~myasu/projects/scisumm_net/scisummnet_release1.1__20190413.zip" - - -class SummertimeScisummnet(datasets.GeneratorBasedBuilder): - """Scisummnet dataset.""" - - VERSION = datasets.Version("1.1.0") - - BUILDER_CONFIGS = [ - datasets.BuilderConfig(), - ] - - def _info(self): - features = datasets.Features( - { - "entry_number": datasets.Value("string"), - "document_xml": datasets.Value("string"), - "citing_sentences_annotated.json": datasets.Value("string"), - "summary": datasets.Value("string"), - } - ) - return datasets.DatasetInfo( - description=_DESCRIPTION, - features=features, - supervised_keys=None, - homepage=_HOMEPAGE, - license=_LICENSE, - citation=_CITATION, - ) - - def _split_generators(self, dl_manager): - """Returns SplitGenerators.""" - my_urls = _URLs - path = dl_manager.download_and_extract(my_urls) - trainpath = os.path.join( - path, "scisummnet_release1.1__20190413", "top1000_complete" - ) - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - # These kwargs will be passed to _generate_examples - gen_kwargs={"extraction_path": trainpath, "split": "train"}, - ) - ] - - def _generate_examples(self, extraction_path, split): - """Yields examples.""" - - for folder in os.listdir(extraction_path): - - entry = {} - - entry["entry_number"] = folder - - doc_xml_path = os.path.join( - extraction_path, folder, "Documents_xml", folder + ".xml" - ) - with open(doc_xml_path, "r", encoding="utf-8") as f: - entry["document_xml"] = f.read() - - cite_annot_path = os.path.join( - extraction_path, folder, "citing_sentences_annotated.json" - ) - with open(cite_annot_path, "r", encoding="utf-8") as f: - entry["citing_sentences_annotated.json"] = f.read() - - summary_path = os.path.join( - extraction_path, folder, "summary", folder + ".gold.txt" - ) - with open(summary_path, "r", encoding="utf-8") as f: - entry["summary"] = f.read() - - yield entry["entry_number"], entry diff --git a/spaces/almakedon/faster-whisper-webui/docs/options.md b/spaces/almakedon/faster-whisper-webui/docs/options.md deleted file mode 100644 index 6979fca4d9d4c98a626a2953c2573ff23898a37e..0000000000000000000000000000000000000000 --- a/spaces/almakedon/faster-whisper-webui/docs/options.md +++ /dev/null @@ -1,134 +0,0 @@ -# Standard Options -To transcribe or translate an audio file, you can either copy an URL from a website (all [websites](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) -supported by YT-DLP will work, including YouTube). Otherwise, upload an audio file (choose "All Files (*.*)" -in the file selector to select any file type, including video files) or use the microphone. - -For longer audio files (>10 minutes), it is recommended that you select Silero VAD (Voice Activity Detector) in the VAD option, especially if you are using the `large-v1` model. Note that `large-v2` is a lot more forgiving, but you may still want to use a VAD with a slightly higher "VAD - Max Merge Size (s)" (60 seconds or more). - -## Model -Select the model that Whisper will use to transcribe the audio: - -| Size | Parameters | English-only model | Multilingual model | Required VRAM | Relative speed | -|-----------|------------|--------------------|--------------------|---------------|----------------| -| tiny | 39 M | tiny.en | tiny | ~1 GB | ~32x | -| base | 74 M | base.en | base | ~1 GB | ~16x | -| small | 244 M | small.en | small | ~2 GB | ~6x | -| medium | 769 M | medium.en | medium | ~5 GB | ~2x | -| large | 1550 M | N/A | large | ~10 GB | 1x | -| large-v2 | 1550 M | N/A | large | ~10 GB | 1x | - -## Language - -Select the language, or leave it empty for Whisper to automatically detect it. - -Note that if the selected language and the language in the audio differs, Whisper may start to translate the audio to the selected -language. For instance, if the audio is in English but you select Japaneese, the model may translate the audio to Japanese. - -## Inputs -The options "URL (YouTube, etc.)", "Upload Files" or "Micriphone Input" allows you to send an audio input to the model. - -### Multiple Files -Note that the UI will only process either the given URL or the upload files (including microphone) - not both. - -But you can upload multiple files either through the "Upload files" option, or as a playlist on YouTube. Each audio file will then be processed in turn, and the resulting SRT/VTT/Transcript will be made available in the "Download" section. When more than one file is processed, the UI will also generate a "All_Output" zip file containing all the text output files. - -## Task -Select the task - either "transcribe" to transcribe the audio to text, or "translate" to translate it to English. - -## Vad -Using a VAD will improve the timing accuracy of each transcribed line, as well as prevent Whisper getting into an infinite -loop detecting the same sentence over and over again. The downside is that this may be at a cost to text accuracy, especially -with regards to unique words or names that appear in the audio. You can compensate for this by increasing the prompt window. - -Note that English is very well handled by Whisper, and it's less susceptible to issues surrounding bad timings and infinite loops. -So you may only need to use a VAD for other languages, such as Japanese, or when the audio is very long. - -* none - * Run whisper on the entire audio input -* silero-vad - * Use Silero VAD to detect sections that contain speech, and run Whisper on independently on each section. Whisper is also run - on the gaps between each speech section, by either expanding the section up to the max merge size, or running Whisper independently - on the non-speech section. -* silero-vad-expand-into-gaps - * Use Silero VAD to detect sections that contain speech, and run Whisper on independently on each section. Each spech section will be expanded - such that they cover any adjacent non-speech sections. For instance, if an audio file of one minute contains the speech sections - 00:00 - 00:10 (A) and 00:30 - 00:40 (B), the first section (A) will be expanded to 00:00 - 00:30, and (B) will be expanded to 00:30 - 00:60. -* silero-vad-skip-gaps - * As above, but sections that doesn't contain speech according to Silero will be skipped. This will be slightly faster, but - may cause dialogue to be skipped. -* periodic-vad - * Create sections of speech every 'VAD - Max Merge Size' seconds. This is very fast and simple, but will potentially break - a sentence or word in two. - -## VAD - Merge Window -If set, any adjacent speech sections that are at most this number of seconds apart will be automatically merged. - -## VAD - Max Merge Size (s) -Disables merging of adjacent speech sections if they are this number of seconds long. - -## VAD - Padding (s) -The number of seconds (floating point) to add to the beginning and end of each speech section. Setting this to a number -larger than zero ensures that Whisper is more likely to correctly transcribe a sentence in the beginning of -a speech section. However, this also increases the probability of Whisper assigning the wrong timestamp -to each transcribed line. The default value is 1 second. - -## VAD - Prompt Window (s) -The text of a detected line will be included as a prompt to the next speech section, if the speech section starts at most this -number of seconds after the line has finished. For instance, if a line ends at 10:00, and the next speech section starts at -10:04, the line's text will be included if the prompt window is 4 seconds or more (10:04 - 10:00 = 4 seconds). - -Note that detected lines in gaps between speech sections will not be included in the prompt -(if silero-vad or silero-vad-expand-into-gaps) is used. - -# Command Line Options - -Both `app.py` and `cli.py` also accept command line options, such as the ability to enable parallel execution on multiple -CPU/GPU cores, the default model name/VAD and so on. Consult the README in the root folder for more information. - -# Additional Options - -In addition to the above, there's also a "Full" options interface that allows you to set all the options available in the Whisper -model. The options are as follows: - -## Initial Prompt -Optional text to provide as a prompt for the first 30 seconds window. Whisper will attempt to use this as a starting point for the transcription, but you can -also get creative and specify a style or format for the output of the transcription. - -For instance, if you use the prompt "hello how is it going always use lowercase no punctuation goodbye one two three start stop i you me they", Whisper will -be biased to output lower capital letters and no punctuation, and may also be biased to output the words in the prompt more often. - -## Temperature -The temperature to use when sampling. Default is 0 (zero). A higher temperature will result in more random output, while a lower temperature will be more deterministic. - -## Best Of - Non-zero temperature -The number of candidates to sample from when sampling with non-zero temperature. Default is 5. - -## Beam Size - Zero temperature -The number of beams to use in beam search when sampling with zero temperature. Default is 5. - -## Patience - Zero temperature -The patience value to use in beam search when sampling with zero temperature. As in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search. - -## Length Penalty - Any temperature -The token length penalty coefficient (alpha) to use when sampling with any temperature. As in https://arxiv.org/abs/1609.08144, uses simple length normalization by default. - -## Suppress Tokens - Comma-separated list of token IDs -A comma-separated list of token IDs to suppress during sampling. The default value of "-1" will suppress most special characters except common punctuations. - -## Condition on previous text -If True, provide the previous output of the model as a prompt for the next window. Disabling this may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop. - -## FP16 -Whether to perform inference in fp16. True by default. - -## Temperature increment on fallback -The temperature to increase when falling back when the decoding fails to meet either of the thresholds below. Default is 0.2. - -## Compression ratio threshold -If the gzip compression ratio is higher than this value, treat the decoding as failed. Default is 2.4. - -## Logprob threshold -If the average log probability is lower than this value, treat the decoding as failed. Default is -1.0. - -## No speech threshold -If the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence. Default is 0.6. diff --git a/spaces/amankishore/sjc/my/README.md b/spaces/amankishore/sjc/my/README.md deleted file mode 100644 index 5daa1c788deef956d5cb6399ecba2c96d947d827..0000000000000000000000000000000000000000 --- a/spaces/amankishore/sjc/my/README.md +++ /dev/null @@ -1,2 +0,0 @@ -a personal tookit for experiment management; -some of the designs patterns are inspired by detectron2 diff --git a/spaces/andreped/AeroPath/.github/ISSUE_TEMPLATE/bug_report.md b/spaces/andreped/AeroPath/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 066324e5c29270804250e512b0dfbac2289eeabd..0000000000000000000000000000000000000000 --- a/spaces/andreped/AeroPath/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Desktop (please complete the following information):** - - OS: [e.g. Windows] - - Version: [e.g. 10] - - Python: [e.g. 3.8.10] - -**Additional context** -Add any other context about the problem here. diff --git a/spaces/anonymousauthorsanonymous/spurious/app.py b/spaces/anonymousauthorsanonymous/spurious/app.py deleted file mode 100644 index 84167686d6c6ffb76ec3635255516622ce12fbf3..0000000000000000000000000000000000000000 --- a/spaces/anonymousauthorsanonymous/spurious/app.py +++ /dev/null @@ -1,472 +0,0 @@ -# %% -import gradio as gr -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import random -from matplotlib.ticker import MaxNLocator -from transformers import pipeline - -MODEL_NAMES = ["bert-base-uncased", "roberta-base", "bert-large-uncased", "roberta-large"] -OWN_MODEL_NAME = 'add-a-model' - -DECIMAL_PLACES = 1 -EPS = 1e-5 # to avoid /0 errors - -# Example date conts -DATE_SPLIT_KEY = "DATE" -START_YEAR = 1801 -STOP_YEAR = 1999 -NUM_PTS = 20 -DATES = np.linspace(START_YEAR, STOP_YEAR, NUM_PTS).astype(int).tolist() -DATES = [f'{d}' for d in DATES] - -# Example place conts -# https://www3.weforum.org/docs/WEF_GGGR_2021.pdf -# Bottom 10 and top 10 Global Gender Gap ranked countries. -PLACE_SPLIT_KEY = "PLACE" -PLACES = [ - "Afghanistan", - "Yemen", - "Iraq", - "Pakistan", - "Syria", - "Democratic Republic of Congo", - "Iran", - "Mali", - "Chad", - "Saudi Arabia", - "Switzerland", - "Ireland", - "Lithuania", - "Rwanda", - "Namibia", - "Sweden", - "New Zealand", - "Norway", - "Finland", - "Iceland"] - - -# Example Reddit interest consts -# in order of increasing self-identified female participation. -# See http://bburky.com/subredditgenderratios/ , Minimum subreddit size: 400000 -SUBREDDITS = [ - "GlobalOffensive", - "pcmasterrace", - "nfl", - "sports", - "The_Donald", - "leagueoflegends", - "Overwatch", - "gonewild", - "Futurology", - "space", - "technology", - "gaming", - "Jokes", - "dataisbeautiful", - "woahdude", - "askscience", - "wow", - "anime", - "BlackPeopleTwitter", - "politics", - "pokemon", - "worldnews", - "reddit.com", - "interestingasfuck", - "videos", - "nottheonion", - "television", - "science", - "atheism", - "movies", - "gifs", - "Music", - "trees", - "EarthPorn", - "GetMotivated", - "pokemongo", - "news", - # removing below subreddit as most of the tokens are taken up by it: - # ['ff', '##ff', '##ff', '##fu', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', ...] - # "fffffffuuuuuuuuuuuu", - "Fitness", - "Showerthoughts", - "OldSchoolCool", - "explainlikeimfive", - "todayilearned", - "gameofthrones", - "AdviceAnimals", - "DIY", - "WTF", - "IAmA", - "cringepics", - "tifu", - "mildlyinteresting", - "funny", - "pics", - "LifeProTips", - "creepy", - "personalfinance", - "food", - "AskReddit", - "books", - "aww", - "sex", - "relationships", -] - -GENDERED_LIST = [ - ['he', 'she'], - ['him', 'her'], - ['his', 'hers'], - ["himself", "herself"], - ['male', 'female'], - ['man', 'woman'], - ['men', 'women'], - ["husband", "wife"], - ['father', 'mother'], - ['boyfriend', 'girlfriend'], - ['brother', 'sister'], - ["actor", "actress"], -] - -# %% -# Fire up the models -models = dict() - -for bert_like in MODEL_NAMES: - models[bert_like] = pipeline("fill-mask", model=bert_like) - -# %% - - -def get_gendered_token_ids(): - male_gendered_tokens = [list[0] for list in GENDERED_LIST] - female_gendered_tokens = [list[1] for list in GENDERED_LIST] - - return male_gendered_tokens, female_gendered_tokens - - -def prepare_text_for_masking(input_text, mask_token, gendered_tokens, split_key): - text_w_masks_list = [ - mask_token if word.lower() in gendered_tokens else word for word in input_text.split()] - num_masks = len([m for m in text_w_masks_list if m == mask_token]) - - text_portions = ' '.join(text_w_masks_list).split(split_key) - return text_portions, num_masks - - -def get_avg_prob_from_pipeline_outputs(mask_filled_text, gendered_token, num_preds): - pronoun_preds = [sum([ - pronoun["score"] if pronoun["token_str"].strip().lower() in gendered_token else 0.0 - for pronoun in top_preds]) - for top_preds in mask_filled_text - ] - return round(sum(pronoun_preds) / (EPS + num_preds) * 100, DECIMAL_PLACES) - -# %% - - -def get_figure(df, gender, n_fit=1): - df = df.set_index('x-axis') - cols = df.columns - xs = list(range(len(df))) - ys = df[cols[0]] - fig, ax = plt.subplots() - # Trying small fig due to rendering issues on HF, not on VS Code - fig.set_figheight(3) - fig.set_figwidth(9) - - # find stackoverflow reference - p, C_p = np.polyfit(xs, ys, n_fit, cov=1) - t = np.linspace(min(xs)-1, max(xs)+1, 10*len(xs)) - TT = np.vstack([t**(n_fit-i) for i in range(n_fit+1)]).T - - # matrix multiplication calculates the polynomial values - yi = np.dot(TT, p) - C_yi = np.dot(TT, np.dot(C_p, TT.T)) # C_y = TT*C_z*TT.T - sig_yi = np.sqrt(np.diag(C_yi)) # Standard deviations are sqrt of diagonal - - ax.fill_between(t, yi+sig_yi, yi-sig_yi, alpha=.25) - ax.plot(t, yi, '-') - ax.plot(df, 'ro') - ax.legend(list(df.columns)) - - ax.axis('tight') - ax.set_xlabel("Value injected into input text") - ax.set_title( - f"Probability of predicting {gender} pronouns.") - ax.set_ylabel(f"Softmax prob for pronouns") - ax.xaxis.set_major_locator(MaxNLocator(6)) - ax.tick_params(axis='x', labelrotation=5) - return fig - - -# %% -def predict_gender_pronouns( - model_name, - own_model_name, - indie_vars, - split_key, - normalizing, - n_fit, - input_text, -): - """Run inference on input_text for each model type, returning df and plots of percentage - of gender pronouns predicted as female and male in each target text. - """ - if model_name not in MODEL_NAMES: - model = pipeline("fill-mask", model=own_model_name) - else: - model = models[model_name] - - mask_token = model.tokenizer.mask_token - - indie_vars_list = indie_vars.split(',') - - male_gendered_tokens, female_gendered_tokens = get_gendered_token_ids() - - text_segments, num_preds = prepare_text_for_masking( - input_text, mask_token, male_gendered_tokens + female_gendered_tokens, split_key) - - male_pronoun_preds = [] - female_pronoun_preds = [] - for indie_var in indie_vars_list: - - target_text = f"{indie_var}".join(text_segments) - mask_filled_text = model(target_text) - # Quick hack as realized return type based on how many MASKs in text. - if type(mask_filled_text[0]) is not list: - mask_filled_text = [mask_filled_text] - - female_pronoun_preds.append(get_avg_prob_from_pipeline_outputs( - mask_filled_text, - female_gendered_tokens, - num_preds - )) - male_pronoun_preds.append(get_avg_prob_from_pipeline_outputs( - mask_filled_text, - male_gendered_tokens, - num_preds - )) - - if normalizing: - total_gendered_probs = np.add( - female_pronoun_preds, male_pronoun_preds) - female_pronoun_preds = np.around( - np.divide(female_pronoun_preds, total_gendered_probs+EPS)*100, - decimals=DECIMAL_PLACES - ) - male_pronoun_preds = np.around( - np.divide(male_pronoun_preds, total_gendered_probs+EPS)*100, - decimals=DECIMAL_PLACES - ) - - results_df = pd.DataFrame({'x-axis': indie_vars_list}) - results_df['female_pronouns'] = female_pronoun_preds - results_df['male_pronouns'] = male_pronoun_preds - female_fig = get_figure(results_df.drop( - 'male_pronouns', axis=1), 'female', n_fit,) - male_fig = get_figure(results_df.drop( - 'female_pronouns', axis=1), 'male', n_fit,) - display_text = f"{random.choice(indie_vars_list)}".join(text_segments) - - return ( - display_text, - female_fig, - male_fig, - results_df, - ) - - -# %% -title = "Causing Gender Pronouns" -description = """ -## Intro -""" - - -date_example = [ - MODEL_NAMES[1], - '', - ', '.join(DATES), - 'DATE', - "False", - 1, - 'She was a teenager in DATE.' -] - - -place_example = [ - MODEL_NAMES[0], - '', - ', '.join(PLACES), - 'PLACE', - "False", - 1, - 'She became an adult in PLACE.' -] - - -subreddit_example = [ - MODEL_NAMES[3], - '', - ', '.join(SUBREDDITS), - 'SUBREDDIT', - "False", - 1, - 'She was a kid. SUBREDDIT.' -] - -own_model_example = [ - OWN_MODEL_NAME, - 'emilyalsentzer/Bio_ClinicalBERT', - ', '.join(DATES), - 'DATE', - "False", - 1, - 'She was exposed to the virus in DATE.' -] - - -def date_fn(): - return date_example - - -def place_fn(): - return place_example - - -def reddit_fn(): - return subreddit_example - - -def your_fn(): - return own_model_example - - -# %% -demo = gr.Blocks() -with demo: - gr.Markdown("# Spurious Correlation Evaluation for Pre-trained LLMs") - gr.Markdown("Find spurious correlations between seemingly independent variables (for example between `gender` and `time`) in almost any BERT-like LLM on Hugging Face, below.") - - # gr.Markdown("Note: If there is an issue with the rendering of the results taking longer than expected (more than 10s of seconds), there may be an unexpected issue effecting the hosting. If so, please see this [backup colab notebook](https://colab.research.google.com/drive/1A3a9cy9fERaxkuoX8YNTFhLlhRt_cxMm?usp=sharing).") - - - gr.Markdown("## Instructions for this Demo") - gr.Markdown("1) Click on one of the examples below (where we sweep through a spectrum of `places`, `dates` and `subreddits`) to pre-populate the input fields.") - gr.Markdown("2) Check out the pre-populated fields as you scroll down to the ['Hit Submit...'] button!") - gr.Markdown("3) Repeat steps (1) and (2) with more pre-populated inputs or with your own values in the input fields!") - - gr.Markdown("## Example inputs") - gr.Markdown("Click a button below to pre-populate input fields with example values. Then scroll down to Hit Submit to generate predictions.") - with gr.Row(): - date_gen = gr.Button('Click for date example inputs') - gr.Markdown("<-- x-axis sorted by older to more recent dates:") - - place_gen = gr.Button('Click for country example inputs') - gr.Markdown( - "<-- x-axis sorted by bottom 10 and top 10 [Global Gender Gap](https://www3.weforum.org/docs/WEF_GGGR_2021.pdf) ranked countries:") - - subreddit_gen = gr.Button('Click for Subreddit example inputs') - gr.Markdown( - "<-- x-axis sorted in order of increasing self-identified female participation (see [bburky](http://bburky.com/subredditgenderratios/)): ") - - your_gen = gr.Button('Add-a-model example inputs') - gr.Markdown("<-- x-axis dates, with your own model loaded! (If first time, try another example, it can take a while to load new model.)") - - gr.Markdown("## Input fields") - gr.Markdown( - f"A) Pick a spectrum of comma separated values for text injection and x-axis.") - - with gr.Row(): - x_axis = gr.Textbox( - lines=3, - label="A) Comma separated values for text injection and x-axis", - ) - - - gr.Markdown("B) Pick a pre-loaded BERT-family model of interest on the right.") - gr.Markdown(f"Or C) select `{OWN_MODEL_NAME}`, then add the mame of any other Hugging Face model that supports the [fill-mask](https://huggingface.co/models?pipeline_tag=fill-mask) task on the right (note: this may take some time to load).") - - with gr.Row(): - model_name = gr.Radio( - MODEL_NAMES + [OWN_MODEL_NAME], - type="value", - label="B) BERT-like model.", - ) - own_model_name = gr.Textbox( - label="C) If you selected an 'add-a-model' model, put any Hugging Face pipeline model name (that supports the fill-mask task) here.", - ) - - gr.Markdown("D) Pick if you want to the predictions normalied to these gendered terms only.") - gr.Markdown("E) Also tell the demo what special token you will use in your input text, that you would like replaced with the spectrum of values you listed above.") - gr.Markdown("And F) the degree of polynomial fit used for high-lighting potential spurious association.") - - - with gr.Row(): - to_normalize = gr.Dropdown( - ["False", "True"], - label="D) Normalize model's predictions to only the gendered ones?", - type="index", - ) - place_holder = gr.Textbox( - label="E) Special token place-holder", - ) - n_fit = gr.Dropdown( - list(range(1, 5)), - label="F) Degree of polynomial fit", - type="value", - ) - - gr.Markdown( - "G) Finally, add input text that includes at least one gendered pronouns and one place-holder token specified above.") - - with gr.Row(): - input_text = gr.Textbox( - lines=2, - label="G) Input text with pronouns and place-holder token", - ) - - gr.Markdown("## Outputs!") - #gr.Markdown("Scroll down and 'Hit Submit'!") - with gr.Row(): - btn = gr.Button("Hit submit to generate predictions!") - - with gr.Row(): - sample_text = gr.Textbox( - type="auto", label="Output text: Sample of text fed to model") - with gr.Row(): - female_fig = gr.Plot(type="auto") - male_fig = gr.Plot(type="auto") - with gr.Row(): - df = gr.Dataframe( - show_label=True, - overflow_row_behaviour="show_ends", - label="Table of softmax probability for pronouns predictions", - ) - - with gr.Row(): - - date_gen.click(date_fn, inputs=[], outputs=[model_name, own_model_name, - x_axis, place_holder, to_normalize, n_fit, input_text]) - place_gen.click(place_fn, inputs=[], outputs=[ - model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text]) - subreddit_gen.click(reddit_fn, inputs=[], outputs=[ - model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text]) - your_gen.click(your_fn, inputs=[], outputs=[ - model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text]) - - btn.click( - predict_gender_pronouns, - inputs=[model_name, own_model_name, x_axis, place_holder, - to_normalize, n_fit, input_text], - outputs=[sample_text, female_fig, male_fig, df]) - - -demo.launch(debug=True) - diff --git a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/chat.py b/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/chat.py deleted file mode 100644 index 98e171b0f35041ec12d01657297a1fc8b9fa91dd..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/chat.py +++ /dev/null @@ -1,562 +0,0 @@ -import ast -import base64 -import copy -import io -import json -import logging -import re -from datetime import datetime -from pathlib import Path - -import yaml -from PIL import Image - -import modules.shared as shared -from modules.extensions import apply_extensions -from modules.html_generator import chat_html_wrapper, make_thumbnail -from modules.text_generation import (encode, generate_reply, - get_max_prompt_length) - - -# Replace multiple string pairs in a string -def replace_all(text, dic): - for i, j in dic.items(): - text = text.replace(i, j) - - return text - - -def generate_chat_prompt(user_input, state, **kwargs): - impersonate = kwargs['impersonate'] if 'impersonate' in kwargs else False - _continue = kwargs['_continue'] if '_continue' in kwargs else False - also_return_rows = kwargs['also_return_rows'] if 'also_return_rows' in kwargs else False - is_instruct = state['mode'] == 'instruct' - rows = [state['context'] if is_instruct else f"{state['context'].strip()}\n"] - min_rows = 3 - - # Finding the maximum prompt size - chat_prompt_size = state['chat_prompt_size'] - if shared.soft_prompt: - chat_prompt_size -= shared.soft_prompt_tensor.shape[1] - - max_length = min(get_max_prompt_length(state), chat_prompt_size) - - # Building the turn templates - if 'turn_template' not in state or state['turn_template'] == '': - if is_instruct: - template = '<|user|>\n<|user-message|>\n<|bot|>\n<|bot-message|>\n' - else: - template = '<|user|>: <|user-message|>\n<|bot|>: <|bot-message|>\n' - else: - template = state['turn_template'].replace(r'\n', '\n') - - replacements = { - '<|user|>': state['name1'].strip(), - '<|bot|>': state['name2'].strip(), - } - - user_turn = replace_all(template.split('<|bot|>')[0], replacements) - bot_turn = replace_all('<|bot|>' + template.split('<|bot|>')[1], replacements) - user_turn_stripped = replace_all(user_turn.split('<|user-message|>')[0], replacements) - bot_turn_stripped = replace_all(bot_turn.split('<|bot-message|>')[0], replacements) - - # Building the prompt - i = len(shared.history['internal']) - 1 - while i >= 0 and len(encode(''.join(rows))[0]) < max_length: - if _continue and i == len(shared.history['internal']) - 1: - rows.insert(1, bot_turn_stripped + shared.history['internal'][i][1].strip()) - else: - rows.insert(1, bot_turn.replace('<|bot-message|>', shared.history['internal'][i][1].strip())) - - string = shared.history['internal'][i][0] - if string not in ['', '<|BEGIN-VISIBLE-CHAT|>']: - rows.insert(1, replace_all(user_turn, {'<|user-message|>': string.strip(), '<|round|>': str(i)})) - - i -= 1 - - if impersonate: - min_rows = 2 - rows.append(user_turn_stripped.rstrip(' ')) - elif not _continue: - # Adding the user message - if len(user_input) > 0: - rows.append(replace_all(user_turn, {'<|user-message|>': user_input.strip(), '<|round|>': str(len(shared.history["internal"]))})) - - # Adding the Character prefix - rows.append(apply_extensions("bot_prefix", bot_turn_stripped.rstrip(' '))) - - while len(rows) > min_rows and len(encode(''.join(rows))[0]) >= max_length: - rows.pop(1) - - prompt = ''.join(rows) - if also_return_rows: - return prompt, rows - else: - return prompt - - -def get_stopping_strings(state): - if state['mode'] == 'instruct': - stopping_strings = [f"\n{state['name1']}", f"\n{state['name2']}"] - else: - stopping_strings = [f"\n{state['name1']}:", f"\n{state['name2']}:"] - - stopping_strings += ast.literal_eval(f"[{state['custom_stopping_strings']}]") - return stopping_strings - - -def extract_message_from_reply(reply, state): - next_character_found = False - stopping_strings = get_stopping_strings(state) - - if state['stop_at_newline']: - lines = reply.split('\n') - reply = lines[0].strip() - if len(lines) > 1: - next_character_found = True - else: - for string in stopping_strings: - idx = reply.find(string) - if idx != -1: - reply = reply[:idx] - next_character_found = True - - # If something like "\nYo" is generated just before "\nYou:" - # is completed, trim it - if not next_character_found: - for string in stopping_strings: - for j in range(len(string) - 1, 0, -1): - if reply[-j:] == string[:j]: - reply = reply[:-j] - break - else: - continue - - break - - return reply, next_character_found - - -def chatbot_wrapper(text, state, regenerate=False, _continue=False): - if shared.model_name == 'None' or shared.model is None: - logging.error("No model is loaded! Select one in the Model tab.") - yield shared.history['visible'] - return - - # Defining some variables - cumulative_reply = '' - just_started = True - visible_text = None - eos_token = '\n' if state['stop_at_newline'] else None - stopping_strings = get_stopping_strings(state) - - # Preparing the input - if not any((regenerate, _continue)): - text, visible_text = apply_extensions('input_hijack', text, visible_text) - if visible_text is None: - visible_text = text - - text = apply_extensions('input', text) - # *Is typing...* - yield shared.history['visible'] + [[visible_text, shared.processing_message]] - else: - text, visible_text = shared.history['internal'][-1][0], shared.history['visible'][-1][0] - if regenerate: - shared.history['visible'].pop() - shared.history['internal'].pop() - # *Is typing...* - yield shared.history['visible'] + [[visible_text, shared.processing_message]] - elif _continue: - last_reply = [shared.history['internal'][-1][1], shared.history['visible'][-1][1]] - yield shared.history['visible'][:-1] + [[visible_text, last_reply[1] + '...']] - - # Generating the prompt - kwargs = {'_continue': _continue} - prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs) - if prompt is None: - prompt = generate_chat_prompt(text, state, **kwargs) - - # Generate - for i in range(state['chat_generation_attempts']): - reply = None - for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings): - reply = cumulative_reply + reply - - # Extracting the reply - reply, next_character_found = extract_message_from_reply(reply, state) - visible_reply = re.sub("(||{{user}})", state['name1'], reply) - visible_reply = apply_extensions("output", visible_reply) - if _continue: - sep = ' ' if last_reply[0][-1] not in [' ', '\n'] else '' - reply = last_reply[0] + sep + reply - sep = ' ' if last_reply[1][-1] not in [' ', '\n'] else '' - visible_reply = last_reply[1] + sep + visible_reply - - # We need this global variable to handle the Stop event, - # otherwise gradio gets confused - if shared.stop_everything: - return shared.history['visible'] - - if just_started: - just_started = False - if not _continue: - shared.history['internal'].append(['', '']) - shared.history['visible'].append(['', '']) - - shared.history['internal'][-1] = [text, reply] - shared.history['visible'][-1] = [visible_text, visible_reply] - yield shared.history['visible'] - if next_character_found: - break - - if reply is not None: - cumulative_reply = reply - - yield shared.history['visible'] - - -def impersonate_wrapper(text, state): - if shared.model_name == 'None' or shared.model is None: - logging.error("No model is loaded! Select one in the Model tab.") - yield '' - return - - # Defining some variables - cumulative_reply = '' - eos_token = '\n' if state['stop_at_newline'] else None - prompt = generate_chat_prompt(text, state, impersonate=True) - stopping_strings = get_stopping_strings(state) - - # Yield *Is typing...* - yield shared.processing_message - for i in range(state['chat_generation_attempts']): - reply = None - for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings): - reply = cumulative_reply + reply - reply, next_character_found = extract_message_from_reply(reply, state) - yield reply - if next_character_found: - break - - if reply is not None: - cumulative_reply = reply - - yield reply - - -def cai_chatbot_wrapper(text, state): - for history in chatbot_wrapper(text, state): - yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) - - -def regenerate_wrapper(text, state): - if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0: - yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode']) - else: - for history in chatbot_wrapper('', state, regenerate=True): - yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) - - -def continue_wrapper(text, state): - if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0: - yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode']) - else: - for history in chatbot_wrapper('', state, _continue=True): - yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) - - -def remove_last_message(name1, name2, mode): - if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>': - last = shared.history['visible'].pop() - shared.history['internal'].pop() - else: - last = ['', ''] - - return chat_html_wrapper(shared.history['visible'], name1, name2, mode), last[0] - - -def send_last_reply_to_input(): - if len(shared.history['internal']) > 0: - return shared.history['internal'][-1][1] - else: - return '' - - -def replace_last_reply(text, name1, name2, mode): - if len(shared.history['visible']) > 0: - shared.history['visible'][-1][1] = text - shared.history['internal'][-1][1] = apply_extensions("input", text) - - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) - - -def send_dummy_message(text, name1, name2, mode): - shared.history['visible'].append([text, '']) - shared.history['internal'].append([apply_extensions("input", text), '']) - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) - - -def send_dummy_reply(text, name1, name2, mode): - if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '': - shared.history['visible'].append(['', '']) - shared.history['internal'].append(['', '']) - - shared.history['visible'][-1][1] = text - shared.history['internal'][-1][1] = apply_extensions("input", text) - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) - - -def clear_html(): - return chat_html_wrapper([], "", "") - - -def clear_chat_log(name1, name2, greeting, mode): - shared.history['visible'] = [] - shared.history['internal'] = [] - - if greeting != '': - shared.history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]] - shared.history['visible'] += [['', apply_extensions("output", greeting)]] - - # Save cleared logs - save_history(mode) - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) - - -def redraw_html(name1, name2, mode): - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) - - -def tokenize_dialogue(dialogue, name1, name2, mode): - history = [] - messages = [] - dialogue = re.sub('', '', dialogue) - dialogue = re.sub('', '', dialogue) - dialogue = re.sub('(\n|^)[Aa]non:', '\\1You:', dialogue) - dialogue = re.sub('(\n|^)\[CHARACTER\]:', f'\\g<1>{name2}:', dialogue) - idx = [m.start() for m in re.finditer(f"(^|\n)({re.escape(name1)}|{re.escape(name2)}):", dialogue)] - if len(idx) == 0: - return history - - for i in range(len(idx) - 1): - messages.append(dialogue[idx[i]:idx[i + 1]].strip()) - - messages.append(dialogue[idx[-1]:].strip()) - entry = ['', ''] - for i in messages: - if i.startswith(f'{name1}:'): - entry[0] = i[len(f'{name1}:'):].strip() - elif i.startswith(f'{name2}:'): - entry[1] = i[len(f'{name2}:'):].strip() - if not (len(entry[0]) == 0 and len(entry[1]) == 0): - history.append(entry) - - entry = ['', ''] - - print("\033[1;32;1m\nDialogue tokenized to:\033[0;37;0m\n", end='') - for row in history: - for column in row: - print("\n") - for line in column.strip().split('\n'): - print("| " + line + "\n") - - print("|\n") - print("------------------------------") - - return history - - -def save_history(mode, timestamp=False): - # Instruct mode histories should not be saved as if - # Alpaca or Vicuna were characters - if mode == 'instruct': - if not timestamp: - return - - fname = f"Instruct_{datetime.now().strftime('%Y%m%d-%H%M%S')}.json" - else: - if timestamp: - fname = f"{shared.character}_{datetime.now().strftime('%Y%m%d-%H%M%S')}.json" - else: - fname = f"{shared.character}_persistent.json" - - if not Path('logs').exists(): - Path('logs').mkdir() - - with open(Path(f'logs/{fname}'), 'w', encoding='utf-8') as f: - f.write(json.dumps({'data': shared.history['internal'], 'data_visible': shared.history['visible']}, indent=2)) - - return Path(f'logs/{fname}') - - -def load_history(file, name1, name2): - file = file.decode('utf-8') - try: - j = json.loads(file) - if 'data' in j: - shared.history['internal'] = j['data'] - if 'data_visible' in j: - shared.history['visible'] = j['data_visible'] - else: - shared.history['visible'] = copy.deepcopy(shared.history['internal']) - except: - shared.history['internal'] = tokenize_dialogue(file, name1, name2) - shared.history['visible'] = copy.deepcopy(shared.history['internal']) - - -def replace_character_names(text, name1, name2): - text = text.replace('{{user}}', name1).replace('{{char}}', name2) - return text.replace('', name1).replace('', name2) - - -def build_pygmalion_style_context(data): - context = "" - if 'char_persona' in data and data['char_persona'] != '': - context += f"{data['char_name']}'s Persona: {data['char_persona']}\n" - - if 'world_scenario' in data and data['world_scenario'] != '': - context += f"Scenario: {data['world_scenario']}\n" - - context = f"{context.strip()}\n\n" - return context - - -def generate_pfp_cache(character): - cache_folder = Path("cache") - if not cache_folder.exists(): - cache_folder.mkdir() - - for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]: - if path.exists(): - img = make_thumbnail(Image.open(path)) - img.save(Path('cache/pfp_character.png'), format='PNG') - return img - - return None - - -def load_character(character, name1, name2, mode): - shared.character = character - context = greeting = turn_template = "" - greeting_field = 'greeting' - picture = None - - # Deleting the profile picture cache, if any - if Path("cache/pfp_character.png").exists(): - Path("cache/pfp_character.png").unlink() - - if character != 'None': - folder = 'characters' if not mode == 'instruct' else 'characters/instruction-following' - picture = generate_pfp_cache(character) - for extension in ["yml", "yaml", "json"]: - filepath = Path(f'{folder}/{character}.{extension}') - if filepath.exists(): - break - - file_contents = open(filepath, 'r', encoding='utf-8').read() - data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents) - - # Finding the bot's name - for k in ['name', 'bot', '<|bot|>', 'char_name']: - if k in data and data[k] != '': - name2 = data[k] - break - - # Find the user name (if any) - for k in ['your_name', 'user', '<|user|>']: - if k in data and data[k] != '': - name1 = data[k] - break - else: - name1 = shared.settings['name1'] - - for field in ['context', 'greeting', 'example_dialogue', 'char_persona', 'char_greeting', 'world_scenario']: - if field in data: - data[field] = replace_character_names(data[field], name1, name2) - - if 'context' in data: - context = data['context'] - if mode != 'instruct': - context = context.strip() + '\n\n' - elif "char_persona" in data: - context = build_pygmalion_style_context(data) - greeting_field = 'char_greeting' - - if 'example_dialogue' in data: - context += f"{data['example_dialogue'].strip()}\n" - - if greeting_field in data: - greeting = data[greeting_field] - - if 'turn_template' in data: - turn_template = data['turn_template'] - - else: - context = shared.settings['context'] - name2 = shared.settings['name2'] - greeting = shared.settings['greeting'] - turn_template = shared.settings['turn_template'] - - if mode != 'instruct': - shared.history['internal'] = [] - shared.history['visible'] = [] - if Path(f'logs/{shared.character}_persistent.json').exists(): - load_history(open(Path(f'logs/{shared.character}_persistent.json'), 'rb').read(), name1, name2) - else: - # Insert greeting if it exists - if greeting != "": - shared.history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]] - shared.history['visible'] += [['', apply_extensions("output", greeting)]] - - # Create .json log files since they don't already exist - save_history(mode) - - return name1, name2, picture, greeting, context, repr(turn_template)[1:-1], chat_html_wrapper(shared.history['visible'], name1, name2, mode) - - -def upload_character(json_file, img, tavern=False): - json_file = json_file if type(json_file) == str else json_file.decode('utf-8') - data = json.loads(json_file) - outfile_name = data["char_name"] - i = 1 - while Path(f'characters/{outfile_name}.json').exists(): - outfile_name = f'{data["char_name"]}_{i:03d}' - i += 1 - - if tavern: - outfile_name = f'TavernAI-{outfile_name}' - - with open(Path(f'characters/{outfile_name}.json'), 'w', encoding='utf-8') as f: - f.write(json_file) - - if img is not None: - img = Image.open(io.BytesIO(img)) - img.save(Path(f'characters/{outfile_name}.png')) - - logging.info(f'New character saved to "characters/{outfile_name}.json".') - return outfile_name - - -def upload_tavern_character(img, name1, name2): - _img = Image.open(io.BytesIO(img)) - _img.getexif() - decoded_string = base64.b64decode(_img.info['chara']) - _json = json.loads(decoded_string) - _json = {"char_name": _json['name'], "char_persona": _json['description'], "char_greeting": _json["first_mes"], "example_dialogue": _json['mes_example'], "world_scenario": _json['scenario']} - return upload_character(json.dumps(_json), img, tavern=True) - - -def upload_your_profile_picture(img, name1, name2, mode): - cache_folder = Path("cache") - if not cache_folder.exists(): - cache_folder.mkdir() - - if img is None: - if Path("cache/pfp_me.png").exists(): - Path("cache/pfp_me.png").unlink() - else: - img = make_thumbnail(img) - img.save(Path('cache/pfp_me.png')) - logging.info('Profile picture saved to "cache/pfp_me.png"') - - return chat_html_wrapper(shared.history['visible'], name1, name2, mode, reset_cache=True) diff --git a/spaces/aphenx/bingo/src/pages/api/sydney.ts b/spaces/aphenx/bingo/src/pages/api/sydney.ts deleted file mode 100644 index 0e7bbf23d77c2e1a6635185a060eeee58b8c8e66..0000000000000000000000000000000000000000 --- a/spaces/aphenx/bingo/src/pages/api/sydney.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next' -import { WebSocket, debug } from '@/lib/isomorphic' -import { BingWebBot } from '@/lib/bots/bing' -import { websocketUtils } from '@/lib/bots/bing/utils' -import { WatchDog, createHeaders } from '@/lib/utils' - - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const conversationContext = req.body - const headers = createHeaders(req.cookies) - debug(headers) - res.setHeader('Content-Type', 'text/stream; charset=UTF-8') - - const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', { - headers: { - ...headers, - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - pragma: 'no-cache', - } - }) - - const closeDog = new WatchDog() - const timeoutDog = new WatchDog() - ws.onmessage = (event) => { - timeoutDog.watch(() => { - ws.send(websocketUtils.packMessage({ type: 6 })) - }, 1500) - closeDog.watch(() => { - ws.close() - }, 10000) - res.write(event.data) - if (/\{"type":([367])\}/.test(String(event.data))) { - const type = parseInt(RegExp.$1, 10) - debug('connection type', type) - if (type === 3) { - ws.close() - } else { - ws.send(websocketUtils.packMessage({ type })) - } - } - } - - ws.onclose = () => { - timeoutDog.reset() - closeDog.reset() - debug('connection close') - res.end() - } - - await new Promise((resolve) => ws.onopen = resolve) - ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 })) - ws.send(websocketUtils.packMessage({ type: 6 })) - ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!))) - req.socket.once('close', () => { - ws.close() - if (!res.closed) { - res.end() - } - }) -} diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/neural_hmm.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/neural_hmm.py deleted file mode 100644 index 0631ba98c00029e9871c965e4c7f465aa32bc406..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/neural_hmm.py +++ /dev/null @@ -1,553 +0,0 @@ -from typing import List - -import torch -import torch.distributions as tdist -import torch.nn.functional as F -from torch import nn -from torch.utils.checkpoint import checkpoint - -from TTS.tts.layers.overflow.common_layers import Outputnet, OverflowUtils -from TTS.tts.layers.tacotron.common_layers import Prenet -from TTS.tts.utils.helpers import sequence_mask - - -class NeuralHMM(nn.Module): - """Autoregressive left to right HMM model primarily used in "Neural HMMs are all you need (for high-quality attention-free TTS)" - - Paper:: - https://arxiv.org/abs/2108.13320 - - Paper abstract:: - Neural sequence-to-sequence TTS has achieved significantly better output quality than statistical speech synthesis using - HMMs. However, neural TTS is generally not probabilistic and uses non-monotonic attention. Attention failures increase - training time and can make synthesis babble incoherently. This paper describes how the old and new paradigms can be - combined to obtain the advantages of both worlds, by replacing attention in neural TTS with an autoregressive left-right - no-skip hidden Markov model defined by a neural network. Based on this proposal, we modify Tacotron 2 to obtain an - HMM-based neural TTS model with monotonic alignment, trained to maximise the full sequence likelihood without - approximation. We also describe how to combine ideas from classical and contemporary TTS for best results. The resulting - example system is smaller and simpler than Tacotron 2, and learns to speak with fewer iterations and less data, whilst - achieving comparable naturalness prior to the post-net. Our approach also allows easy control over speaking rate. - - Args: - frame_channels (int): Output dimension to generate. - ar_order (int): Autoregressive order of the model. In ablations of Neural HMM it was found that more autoregression while giving more variation hurts naturalness of the synthesised audio. - deterministic_transition (bool): deterministic duration generation based on duration quantiles as defiend in "S. Ronanki, O. Watts, S. King, and G. E. Henter, “Medianbased generation of synthetic speech durations using a nonparametric approach,” in Proc. SLT, 2016.". Defaults to True. - encoder_dim (int): Channels of encoder input and character embedding tensors. Defaults to 512. - prenet_type (str): `original` or `bn`. `original` sets the default Prenet and `bn` uses Batch Normalization version of the Prenet. - prenet_dim (int): Dimension of the Prenet. - prenet_n_layers (int): Number of layers in the Prenet. - prenet_dropout (float): Dropout probability of the Prenet. - prenet_dropout_at_inference (bool): If True, dropout is applied at inference time. - memory_rnn_dim (int): Size of the memory RNN to process output of prenet. - outputnet_size (List[int]): Size of the output network inside the neural HMM. - flat_start_params (dict): Parameters for the flat start initialization of the neural HMM. - std_floor (float): Floor value for the standard deviation of the neural HMM. Prevents model cheating by putting point mass and getting infinite likelihood at any datapoint. - use_grad_checkpointing (bool, optional): Use gradient checkpointing to save memory. Defaults to True. - """ - - def __init__( - self, - frame_channels: int, - ar_order: int, - deterministic_transition: bool, - encoder_dim: int, - prenet_type: str, - prenet_dim: int, - prenet_n_layers: int, - prenet_dropout: float, - prenet_dropout_at_inference: bool, - memory_rnn_dim: int, - outputnet_size: List[int], - flat_start_params: dict, - std_floor: float, - use_grad_checkpointing: bool = True, - ): - super().__init__() - - self.frame_channels = frame_channels - self.ar_order = ar_order - self.deterministic_transition = deterministic_transition - self.prenet_dim = prenet_dim - self.memory_rnn_dim = memory_rnn_dim - self.use_grad_checkpointing = use_grad_checkpointing - - self.transition_model = TransitionModel() - self.emission_model = EmissionModel() - - assert ar_order > 0, f"AR order must be greater than 0 provided {ar_order}" - - self.ar_order = ar_order - self.prenet = Prenet( - in_features=frame_channels * ar_order, - prenet_type=prenet_type, - prenet_dropout=prenet_dropout, - dropout_at_inference=prenet_dropout_at_inference, - out_features=[self.prenet_dim for _ in range(prenet_n_layers)], - bias=False, - ) - self.memory_rnn = nn.LSTMCell(input_size=prenet_dim, hidden_size=memory_rnn_dim) - self.output_net = Outputnet( - encoder_dim, memory_rnn_dim, frame_channels, outputnet_size, flat_start_params, std_floor - ) - self.register_buffer("go_tokens", torch.zeros(ar_order, 1)) - - def forward(self, inputs, inputs_len, mels, mel_lens): - r"""HMM forward algorithm for training uses logarithmic version of Rabiner (1989) forward algorithm. - - Args: - inputs (torch.FloatTensor): Encoder outputs - inputs_len (torch.LongTensor): Encoder output lengths - mels (torch.FloatTensor): Mel inputs - mel_lens (torch.LongTensor): Length of mel inputs - - Shapes: - - inputs: (B, T, D_out_enc) - - inputs_len: (B) - - mels: (B, D_mel, T_mel) - - mel_lens: (B) - - Returns: - log_prob (torch.FloatTensor): Log probability of the sequence - """ - # Get dimensions of inputs - batch_size, N, _ = inputs.shape - T_max = torch.max(mel_lens) - mels = mels.permute(0, 2, 1) - - # Intialize forward algorithm - log_state_priors = self._initialize_log_state_priors(inputs) - log_c, log_alpha_scaled, transition_matrix, means = self._initialize_forward_algorithm_variables(mels, N) - - # Initialize autoregression elements - ar_inputs = self._add_go_token(mels) - h_memory, c_memory = self._init_lstm_states(batch_size, self.memory_rnn_dim, mels) - - for t in range(T_max): - # Process Autoregression - h_memory, c_memory = self._process_ar_timestep(t, ar_inputs, h_memory, c_memory) - # Get mean, std and transition vector from decoder for this timestep - # Note: Gradient checkpointing currently doesn't works with multiple gpus inside a loop - if self.use_grad_checkpointing and self.training: - mean, std, transition_vector = checkpoint(self.output_net, h_memory, inputs) - else: - mean, std, transition_vector = self.output_net(h_memory, inputs) - - if t == 0: - log_alpha_temp = log_state_priors + self.emission_model(mels[:, 0], mean, std, inputs_len) - else: - log_alpha_temp = self.emission_model(mels[:, t], mean, std, inputs_len) + self.transition_model( - log_alpha_scaled[:, t - 1, :], transition_vector, inputs_len - ) - log_c[:, t] = torch.logsumexp(log_alpha_temp, dim=1) - log_alpha_scaled[:, t, :] = log_alpha_temp - log_c[:, t].unsqueeze(1) - transition_matrix[:, t] = transition_vector # needed for absorption state calculation - - # Save for plotting - means.append(mean.detach()) - - log_c, log_alpha_scaled = self._mask_lengths(mel_lens, log_c, log_alpha_scaled) - - sum_final_log_c = self.get_absorption_state_scaling_factor( - mel_lens, log_alpha_scaled, inputs_len, transition_matrix - ) - - log_probs = torch.sum(log_c, dim=1) + sum_final_log_c - - return log_probs, log_alpha_scaled, transition_matrix, means - - @staticmethod - def _mask_lengths(mel_lens, log_c, log_alpha_scaled): - """ - Mask the lengths of the forward variables so that the variable lenghts - do not contribute in the loss calculation - Args: - mel_inputs (torch.FloatTensor): (batch, T, frame_channels) - mel_inputs_lengths (torch.IntTensor): (batch) - log_c (torch.FloatTensor): (batch, T) - Returns: - log_c (torch.FloatTensor) : scaled probabilities (batch, T) - log_alpha_scaled (torch.FloatTensor): forward probabilities (batch, T, N) - """ - mask_log_c = sequence_mask(mel_lens) - log_c = log_c * mask_log_c - mask_log_alpha_scaled = mask_log_c.unsqueeze(2) - log_alpha_scaled = log_alpha_scaled * mask_log_alpha_scaled - return log_c, log_alpha_scaled - - def _process_ar_timestep( - self, - t, - ar_inputs, - h_memory, - c_memory, - ): - """ - Process autoregression in timestep - 1. At a specific t timestep - 2. Perform data dropout if applied (we did not use it) - 3. Run the autoregressive frame through the prenet (has dropout) - 4. Run the prenet output through the post prenet rnn - - Args: - t (int): mel-spec timestep - ar_inputs (torch.FloatTensor): go-token appended mel-spectrograms - - shape: (b, D_out, T_out) - h_post_prenet (torch.FloatTensor): previous timestep rnn hidden state - - shape: (b, memory_rnn_dim) - c_post_prenet (torch.FloatTensor): previous timestep rnn cell state - - shape: (b, memory_rnn_dim) - - Returns: - h_post_prenet (torch.FloatTensor): rnn hidden state of the current timestep - c_post_prenet (torch.FloatTensor): rnn cell state of the current timestep - """ - prenet_input = ar_inputs[:, t : t + self.ar_order].flatten(1) - memory_inputs = self.prenet(prenet_input) - h_memory, c_memory = self.memory_rnn(memory_inputs, (h_memory, c_memory)) - return h_memory, c_memory - - def _add_go_token(self, mel_inputs): - """Append the go token to create the autoregressive input - Args: - mel_inputs (torch.FloatTensor): (batch_size, T, n_mel_channel) - Returns: - ar_inputs (torch.FloatTensor): (batch_size, T, n_mel_channel) - """ - batch_size, T, _ = mel_inputs.shape - go_tokens = self.go_tokens.unsqueeze(0).expand(batch_size, self.ar_order, self.frame_channels) - ar_inputs = torch.cat((go_tokens, mel_inputs), dim=1)[:, :T] - return ar_inputs - - @staticmethod - def _initialize_forward_algorithm_variables(mel_inputs, N): - r"""Initialize placeholders for forward algorithm variables, to use a stable - version we will use log_alpha_scaled and the scaling constant - - Args: - mel_inputs (torch.FloatTensor): (b, T_max, frame_channels) - N (int): number of states - Returns: - log_c (torch.FloatTensor): Scaling constant (b, T_max) - """ - b, T_max, _ = mel_inputs.shape - log_alpha_scaled = mel_inputs.new_zeros((b, T_max, N)) - log_c = mel_inputs.new_zeros(b, T_max) - transition_matrix = mel_inputs.new_zeros((b, T_max, N)) - - # Saving for plotting later, will not have gradient tapes - means = [] - return log_c, log_alpha_scaled, transition_matrix, means - - @staticmethod - def _init_lstm_states(batch_size, hidden_state_dim, device_tensor): - r""" - Initialize Hidden and Cell states for LSTM Cell - - Args: - batch_size (Int): batch size - hidden_state_dim (Int): dimensions of the h and c - device_tensor (torch.FloatTensor): useful for the device and type - - Returns: - (torch.FloatTensor): shape (batch_size, hidden_state_dim) - can be hidden state for LSTM - (torch.FloatTensor): shape (batch_size, hidden_state_dim) - can be the cell state for LSTM - """ - return ( - device_tensor.new_zeros(batch_size, hidden_state_dim), - device_tensor.new_zeros(batch_size, hidden_state_dim), - ) - - def get_absorption_state_scaling_factor(self, mels_len, log_alpha_scaled, inputs_len, transition_vector): - """Returns the final scaling factor of absorption state - - Args: - mels_len (torch.IntTensor): Input size of mels to - get the last timestep of log_alpha_scaled - log_alpha_scaled (torch.FloatTEnsor): State probabilities - text_lengths (torch.IntTensor): length of the states to - mask the values of states lengths - ( - Useful when the batch has very different lengths, - when the length of an observation is less than - the number of max states, then the log alpha after - the state value is filled with -infs. So we mask - those values so that it only consider the states - which are needed for that length - ) - transition_vector (torch.FloatTensor): transtiion vector for each state per timestep - - Shapes: - - mels_len: (batch_size) - - log_alpha_scaled: (batch_size, N, T) - - text_lengths: (batch_size) - - transition_vector: (batch_size, N, T) - - Returns: - sum_final_log_c (torch.FloatTensor): (batch_size) - - """ - N = torch.max(inputs_len) - max_inputs_len = log_alpha_scaled.shape[2] - state_lengths_mask = sequence_mask(inputs_len, max_len=max_inputs_len) - - last_log_alpha_scaled_index = ( - (mels_len - 1).unsqueeze(-1).expand(-1, N).unsqueeze(1) - ) # Batch X Hidden State Size - last_log_alpha_scaled = torch.gather(log_alpha_scaled, 1, last_log_alpha_scaled_index).squeeze(1) - last_log_alpha_scaled = last_log_alpha_scaled.masked_fill(~state_lengths_mask, -float("inf")) - - last_transition_vector = torch.gather(transition_vector, 1, last_log_alpha_scaled_index).squeeze(1) - last_transition_probability = torch.sigmoid(last_transition_vector) - log_probability_of_transitioning = OverflowUtils.log_clamped(last_transition_probability) - - last_transition_probability_index = self.get_mask_for_last_item(inputs_len, inputs_len.device) - log_probability_of_transitioning = log_probability_of_transitioning.masked_fill( - ~last_transition_probability_index, -float("inf") - ) - final_log_c = last_log_alpha_scaled + log_probability_of_transitioning - - # If the length of the mel is less than the number of states it will select the -inf values leading to nan gradients - # Ideally, we should clean the dataset otherwise this is a little hack uncomment the line below - final_log_c = final_log_c.clamp(min=torch.finfo(final_log_c.dtype).min) - - sum_final_log_c = torch.logsumexp(final_log_c, dim=1) - return sum_final_log_c - - @staticmethod - def get_mask_for_last_item(lengths, device, out_tensor=None): - """Returns n-1 mask for the last item in the sequence. - - Args: - lengths (torch.IntTensor): lengths in a batch - device (str, optional): Defaults to "cpu". - out_tensor (torch.Tensor, optional): uses the memory of a specific tensor. - Defaults to None. - - Returns: - - Shape: :math:`(b, max_len)` - """ - max_len = torch.max(lengths).item() - ids = ( - torch.arange(0, max_len, device=device) if out_tensor is None else torch.arange(0, max_len, out=out_tensor) - ) - mask = ids == lengths.unsqueeze(1) - 1 - return mask - - @torch.inference_mode() - def inference( - self, - inputs: torch.FloatTensor, - input_lens: torch.LongTensor, - sampling_temp: float, - max_sampling_time: int, - duration_threshold: float, - ): - """Inference from autoregressive neural HMM - - Args: - inputs (torch.FloatTensor): input states - - shape: :math:`(b, T, d)` - input_lens (torch.LongTensor): input state lengths - - shape: :math:`(b)` - sampling_temp (float): sampling temperature - max_sampling_temp (int): max sampling temperature - duration_threshold (float): duration threshold to switch to next state - - Use this to change the spearking rate of the synthesised audio - """ - - b = inputs.shape[0] - outputs = { - "hmm_outputs": [], - "hmm_outputs_len": [], - "alignments": [], - "input_parameters": [], - "output_parameters": [], - } - for i in range(b): - neural_hmm_outputs, states_travelled, input_parameters, output_parameters = self.sample( - inputs[i : i + 1], input_lens[i], sampling_temp, max_sampling_time, duration_threshold - ) - - outputs["hmm_outputs"].append(neural_hmm_outputs) - outputs["hmm_outputs_len"].append(neural_hmm_outputs.shape[0]) - outputs["alignments"].append(states_travelled) - outputs["input_parameters"].append(input_parameters) - outputs["output_parameters"].append(output_parameters) - - outputs["hmm_outputs"] = nn.utils.rnn.pad_sequence(outputs["hmm_outputs"], batch_first=True) - outputs["hmm_outputs_len"] = torch.tensor( - outputs["hmm_outputs_len"], dtype=input_lens.dtype, device=input_lens.device - ) - return outputs - - @torch.inference_mode() - def sample(self, inputs, input_lens, sampling_temp, max_sampling_time, duration_threshold): - """Samples an output from the parameter models - - Args: - inputs (torch.FloatTensor): input states - - shape: :math:`(1, T, d)` - input_lens (torch.LongTensor): input state lengths - - shape: :math:`(1)` - sampling_temp (float): sampling temperature - max_sampling_time (int): max sampling time - duration_threshold (float): duration threshold to switch to next state - - Returns: - outputs (torch.FloatTensor): Output Observations - - Shape: :math:`(T, output_dim)` - states_travelled (list[int]): Hidden states travelled - - Shape: :math:`(T)` - input_parameters (list[torch.FloatTensor]): Input parameters - output_parameters (list[torch.FloatTensor]): Output parameters - """ - states_travelled, outputs, t = [], [], 0 - - # Sample initial state - current_state = 0 - states_travelled.append(current_state) - - # Prepare autoregression - prenet_input = self.go_tokens.unsqueeze(0).expand(1, self.ar_order, self.frame_channels) - h_memory, c_memory = self._init_lstm_states(1, self.memory_rnn_dim, prenet_input) - - input_parameter_values = [] - output_parameter_values = [] - quantile = 1 - while True: - memory_input = self.prenet(prenet_input.flatten(1).unsqueeze(0)) - # will be 1 while sampling - h_memory, c_memory = self.memory_rnn(memory_input.squeeze(0), (h_memory, c_memory)) - - z_t = inputs[:, current_state].unsqueeze(0) # Add fake time dimension - mean, std, transition_vector = self.output_net(h_memory, z_t) - - transition_probability = torch.sigmoid(transition_vector.flatten()) - staying_probability = torch.sigmoid(-transition_vector.flatten()) - - # Save for plotting - input_parameter_values.append([prenet_input, current_state]) - output_parameter_values.append([mean, std, transition_probability]) - - x_t = self.emission_model.sample(mean, std, sampling_temp=sampling_temp) - - # Prepare autoregressive input for next iteration - prenet_input = torch.cat((prenet_input, x_t), dim=1)[:, 1:] - - outputs.append(x_t.flatten()) - - transition_matrix = torch.cat((staying_probability, transition_probability)) - quantile *= staying_probability - if not self.deterministic_transition: - switch = transition_matrix.multinomial(1)[0].item() - else: - switch = quantile < duration_threshold - - if switch: - current_state += 1 - quantile = 1 - - states_travelled.append(current_state) - - if (current_state == input_lens) or (max_sampling_time and t == max_sampling_time - 1): - break - - t += 1 - - return ( - torch.stack(outputs, dim=0), - F.one_hot(input_lens.new_tensor(states_travelled)), - input_parameter_values, - output_parameter_values, - ) - - @staticmethod - def _initialize_log_state_priors(text_embeddings): - """Creates the log pi in forward algorithm. - - Args: - text_embeddings (torch.FloatTensor): used to create the log pi - on current device - - Shapes: - - text_embeddings: (B, T, D_out_enc) - """ - N = text_embeddings.shape[1] - log_state_priors = text_embeddings.new_full([N], -float("inf")) - log_state_priors[0] = 0.0 - return log_state_priors - - -class TransitionModel(nn.Module): - """Transition Model of the HMM, it represents the probability of transitioning - form current state to all other states""" - - def forward(self, log_alpha_scaled, transition_vector, inputs_len): # pylint: disable=no-self-use - r""" - product of the past state with transitional probabilities in log space - - Args: - log_alpha_scaled (torch.Tensor): Multiply previous timestep's alphas by - transition matrix (in log domain) - - shape: (batch size, N) - transition_vector (torch.tensor): transition vector for each state - - shape: (N) - inputs_len (int tensor): Lengths of states in a batch - - shape: (batch) - - Returns: - out (torch.FloatTensor): log probability of transitioning to each state - """ - transition_p = torch.sigmoid(transition_vector) - staying_p = torch.sigmoid(-transition_vector) - - log_staying_probability = OverflowUtils.log_clamped(staying_p) - log_transition_probability = OverflowUtils.log_clamped(transition_p) - - staying = log_alpha_scaled + log_staying_probability - leaving = log_alpha_scaled + log_transition_probability - leaving = leaving.roll(1, dims=1) - leaving[:, 0] = -float("inf") - inputs_len_mask = sequence_mask(inputs_len) - out = OverflowUtils.logsumexp(torch.stack((staying, leaving), dim=2), dim=2) - out = out.masked_fill(~inputs_len_mask, -float("inf")) # There are no states to contribute to the loss - return out - - -class EmissionModel(nn.Module): - """Emission Model of the HMM, it represents the probability of - emitting an observation based on the current state""" - - def __init__(self) -> None: - super().__init__() - self.distribution_function: tdist.Distribution = tdist.normal.Normal - - def sample(self, means, stds, sampling_temp): - return self.distribution_function(means, stds * sampling_temp).sample() if sampling_temp > 0 else means - - def forward(self, x_t, means, stds, state_lengths): - r"""Calculates the log probability of the the given data (x_t) - being observed from states with given means and stds - Args: - x_t (float tensor) : observation at current time step - - shape: (batch, feature_dim) - means (float tensor): means of the distributions of hidden states - - shape: (batch, hidden_state, feature_dim) - stds (float tensor): standard deviations of the distributions of the hidden states - - shape: (batch, hidden_state, feature_dim) - state_lengths (int tensor): Lengths of states in a batch - - shape: (batch) - - Returns: - out (float tensor): observation log likelihoods, - expressing the probability of an observation - being generated from a state i - shape: (batch, hidden_state) - """ - emission_dists = self.distribution_function(means, stds) - out = emission_dists.log_prob(x_t.unsqueeze(1)) - state_lengths_mask = sequence_mask(state_lengths).unsqueeze(2) - out = torch.sum(out * state_lengths_mask, dim=2) - return out diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Signature/test_pss.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Signature/test_pss.py deleted file mode 100644 index 535474bec5f00a4b02185c3bdfe4b1e303cdb9fd..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Signature/test_pss.py +++ /dev/null @@ -1,377 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2014, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -import unittest - -from Crypto.Util.py3compat import b, bchr -from Crypto.Util.number import bytes_to_long -from Crypto.Util.strxor import strxor -from Crypto.SelfTest.st_common import list_test_cases -from Crypto.SelfTest.loader import load_test_vectors, load_test_vectors_wycheproof - -from Crypto.Hash import SHA1, SHA224, SHA256, SHA384, SHA512 -from Crypto.PublicKey import RSA -from Crypto.Signature import pss -from Crypto.Signature import PKCS1_PSS - -from Crypto.Signature.pss import MGF1 - - -def load_hash_by_name(hash_name): - return __import__("Crypto.Hash." + hash_name, globals(), locals(), ["new"]) - - -class PRNG(object): - - def __init__(self, stream): - self.stream = stream - self.idx = 0 - - def __call__(self, rnd_size): - result = self.stream[self.idx:self.idx + rnd_size] - self.idx += rnd_size - return result - - -class PSS_Tests(unittest.TestCase): - - rsa_key = b'-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAsvI34FgiTK8+txBvmooNGpNwk23YTU51dwNZi5yha3W4lA/Q\nvcZrDalkmD7ekWQwnduxVKa6pRSI13KBgeUOIqJoGXSWhntEtY3FEwvWOHW5AE7Q\njUzTzCiYT6TVaCcpa/7YLai+p6ai2g5f5Zfh4jSawa9uYeuggFygQq4IVW796MgV\nyqxYMM/arEj+/sKz3Viua9Rp9fFosertCYCX4DUTgW0mX9bwEnEOgjSI3pLOPXz1\n8vx+DRZS5wMCmwCUa0sKonLn3cAUPq+sGix7+eo7T0Z12MU8ud7IYVX/75r3cXiF\nPaYE2q8Le0kgOApIXbb+x74x0rNgyIh1yGygkwIDAQABAoIBABz4t1A0pLT6qHI2\nEIOaNz3mwhK0dZEqkz0GB1Dhtoax5ATgvKCFB98J3lYB08IBURe1snOsnMpOVUtg\naBRSM+QqnCUG6bnzKjAkuFP5liDE+oNQv1YpKp9CsUovuzdmI8Au3ewihl+ZTIN2\nUVNYMEOR1b5m+z2SSwWNOYsiJwpBrT7zkpdlDyjat7FiiPhMMIMXjhQFVxURMIcB\njUBtPzGvV/PG90cVDWi1wRGeeP1dDqti/jsnvykQ15KW1MqGrpeNKRmDdTy/Ucl1\nWIoYklKw3U456lgZ/rDTDB818+Tlnk35z4yF7d5ANPM8CKfqOPcnO1BCKVFzf4eq\n54wvUtkCgYEA1Zv2lp06l7rXMsvNtyYQjbFChezRDRnPwZmN4NCdRtTgGG1G0Ryd\nYz6WWoPGqZp0b4LAaaHd3W2GTcpXF8WXMKfMX1W+tMAxMozfsXRKMcHoypwuS5wT\nfJRXJCG4pvd57AB0iVUEJW2we+uGKU5Zxcx//id2nXGCpoRyViIplQsCgYEA1nVC\neHupHChht0Fh4N09cGqZHZzuwXjOUMzR3Vsfz+4WzVS3NvIgN4g5YgmQFOeKwo5y\niRq5yvubcNdFvf85eHWClg0zPAyxJCVUWigCrrOanGEhJo6re4idJvNVzu4Ucg0v\n6B3SJ1HsCda+ZSNz24bSyqRep8A+RoAaoVSFx5kCgYEAn3RvXPs9s+obnqWYiPF3\nRe5etE6Vt2vfNKwFxx6zaR6bsmBQjuUHcABWiHb6I71S0bMPI0tbrWGG8ibrYKl1\nNTLtUvVVCOS3VP7oNTWT9RTFTAnOXU7DFSo+6o/poWn3r36ff6zhDXeWWMr2OXtt\ndEQ1/2lCGEGVv+v61eVmmQUCgYABFHITPTwqwiFL1O5zPWnzyPWgaovhOYSAb6eW\n38CXQXGn8wdBJZL39J2lWrr4//l45VK6UgIhfYbY2JynSkO10ZGow8RARygVMILu\nOUlaK9lZdDvAf/NpGdUAvzTtZ9F+iYZ2OsA2JnlzyzsGM1l//3vMPWukmJk3ral0\nqoJJ8QKBgGRG3eVHnIegBbFVuMDp2NTcfuSuDVUQ1fGAwtPiFa8u81IodJnMk2pq\niXu2+0ytNA/M+SVrAnE2AgIzcaJbtr0p2srkuVM7KMWnG1vWFNjtXN8fAhf/joOv\nD+NmPL/N4uE57e40tbiU/H7KdyZaDt+5QiTmdhuyAe6CBjKsF2jy\n-----END RSA PRIVATE KEY-----' - msg = b'AAA' - tag = b'\x00[c5\xd8\xb0\x8b!D\x81\x83\x07\xc0\xdd\xb9\xb4\xb2`\x92\xe7\x02\xf1\xe1P\xea\xc3\xf0\xe3>\xddX5\xdd\x8e\xc5\x89\xef\xf3\xc2\xdc\xfeP\x02\x7f\x12+\xc9\xaf\xbb\xec\xfe\xb0\xa5\xb9\x08\x11P\x8fL\xee5\x9b\xb0k{=_\xd2\x14\xfb\x01R\xb7\xfe\x14}b\x03\x8d5Y\x89~}\xfc\xf2l\xd01-\xbd\xeb\x11\xcdV\x11\xe9l\x19k/o5\xa2\x0f\x15\xe7Q$\t=\xec\x1dAB\x19\xa5P\x9a\xaf\xa3G\x86"\xd6~\xf0j\xfcqkbs\x13\x84b\xe4\xbdm(\xed`\xa4F\xfb\x8f.\xe1\x8c)/_\x9eS\x98\xa4v\xb8\xdc\xfe\xf7/D\x18\x19\xb3T\x97:\xe2\x96s\xe8<\xa2\xb4\xb9\xf8/' - - def test_positive_1(self): - key = RSA.import_key(self.rsa_key) - h = SHA256.new(self.msg) - verifier = pss.new(key) - verifier.verify(h, self.tag) - - def test_negative_1(self): - key = RSA.import_key(self.rsa_key) - h = SHA256.new(self.msg + b'A') - verifier = pss.new(key) - tag = bytearray(self.tag) - self.assertRaises(ValueError, verifier.verify, h, tag) - - def test_negative_2(self): - key = RSA.import_key(self.rsa_key) - h = SHA256.new(self.msg) - verifier = pss.new(key, salt_bytes=1000) - tag = bytearray(self.tag) - self.assertRaises(ValueError, verifier.verify, h, tag) - - -class FIPS_PKCS1_Verify_Tests(unittest.TestCase): - - def shortDescription(self): - return "FIPS PKCS1 Tests (Verify)" - - def verify_positive(self, hashmod, message, public_key, salt, signature): - prng = PRNG(salt) - hashed = hashmod.new(message) - verifier = pss.new(public_key, salt_bytes=len(salt), rand_func=prng) - verifier.verify(hashed, signature) - - def verify_negative(self, hashmod, message, public_key, salt, signature): - prng = PRNG(salt) - hashed = hashmod.new(message) - verifier = pss.new(public_key, salt_bytes=len(salt), rand_func=prng) - self.assertRaises(ValueError, verifier.verify, hashed, signature) - - def test_can_sign(self): - test_public_key = RSA.generate(1024).public_key() - verifier = pss.new(test_public_key) - self.assertEqual(verifier.can_sign(), False) - - -class FIPS_PKCS1_Verify_Tests_KAT(unittest.TestCase): - pass - - -test_vectors_verify = load_test_vectors(("Signature", "PKCS1-PSS"), - "SigVerPSS_186-3.rsp", - "Signature Verification 186-3", - {'shaalg': lambda x: x, - 'result': lambda x: x}) or [] - - -for count, tv in enumerate(test_vectors_verify): - if isinstance(tv, str): - continue - if hasattr(tv, "n"): - modulus = tv.n - continue - if hasattr(tv, "p"): - continue - - hash_module = load_hash_by_name(tv.shaalg.upper()) - hash_obj = hash_module.new(tv.msg) - public_key = RSA.construct([bytes_to_long(x) for x in (modulus, tv.e)]) # type: ignore - if tv.saltval != b("\x00"): - prng = PRNG(tv.saltval) - verifier = pss.new(public_key, salt_bytes=len(tv.saltval), rand_func=prng) - else: - verifier = pss.new(public_key, salt_bytes=0) - - def positive_test(self, hash_obj=hash_obj, verifier=verifier, signature=tv.s): - verifier.verify(hash_obj, signature) - - def negative_test(self, hash_obj=hash_obj, verifier=verifier, signature=tv.s): - self.assertRaises(ValueError, verifier.verify, hash_obj, signature) - - if tv.result == 'p': - setattr(FIPS_PKCS1_Verify_Tests_KAT, "test_positive_%d" % count, positive_test) - else: - setattr(FIPS_PKCS1_Verify_Tests_KAT, "test_negative_%d" % count, negative_test) - - -class FIPS_PKCS1_Sign_Tests(unittest.TestCase): - - def shortDescription(self): - return "FIPS PKCS1 Tests (Sign)" - - def test_can_sign(self): - test_private_key = RSA.generate(1024) - signer = pss.new(test_private_key) - self.assertEqual(signer.can_sign(), True) - - -class FIPS_PKCS1_Sign_Tests_KAT(unittest.TestCase): - pass - - -test_vectors_sign = load_test_vectors(("Signature", "PKCS1-PSS"), - "SigGenPSS_186-2.txt", - "Signature Generation 186-2", - {'shaalg': lambda x: x}) or [] - -test_vectors_sign += load_test_vectors(("Signature", "PKCS1-PSS"), - "SigGenPSS_186-3.txt", - "Signature Generation 186-3", - {'shaalg': lambda x: x}) or [] - -for count, tv in enumerate(test_vectors_sign): - if isinstance(tv, str): - continue - if hasattr(tv, "n"): - modulus = tv.n - continue - if hasattr(tv, "e"): - private_key = RSA.construct([bytes_to_long(x) for x in (modulus, tv.e, tv.d)]) # type: ignore - continue - - hash_module = load_hash_by_name(tv.shaalg.upper()) - hash_obj = hash_module.new(tv.msg) - if tv.saltval != b("\x00"): - prng = PRNG(tv.saltval) - signer = pss.new(private_key, salt_bytes=len(tv.saltval), rand_func=prng) - else: - signer = pss.new(private_key, salt_bytes=0) - - def new_test(self, hash_obj=hash_obj, signer=signer, result=tv.s): - signature = signer.sign(hash_obj) - self.assertEqual(signature, result) - - setattr(FIPS_PKCS1_Sign_Tests_KAT, "test_%d" % count, new_test) - - -class PKCS1_Legacy_Module_Tests(unittest.TestCase): - """Verify that the legacy module Crypto.Signature.PKCS1_PSS - behaves as expected. The only difference is that the verify() - method returns True/False and does not raise exceptions.""" - - def shortDescription(self): - return "Test legacy Crypto.Signature.PKCS1_PSS" - - def runTest(self): - key = RSA.generate(1024) - hashed = SHA1.new(b("Test")) - good_signature = PKCS1_PSS.new(key).sign(hashed) - verifier = PKCS1_PSS.new(key.public_key()) - - self.assertEqual(verifier.verify(hashed, good_signature), True) - - # Flip a few bits in the signature - bad_signature = strxor(good_signature, bchr(1) * len(good_signature)) - self.assertEqual(verifier.verify(hashed, bad_signature), False) - - -class PKCS1_All_Hashes_Tests(unittest.TestCase): - - def shortDescription(self): - return "Test PKCS#1 PSS signature in combination with all hashes" - - def runTest(self): - - key = RSA.generate(1280) - signer = pss.new(key) - hash_names = ("MD2", "MD4", "MD5", "RIPEMD160", "SHA1", - "SHA224", "SHA256", "SHA384", "SHA512", - "SHA3_224", "SHA3_256", "SHA3_384", "SHA3_512") - - for name in hash_names: - hashed = load_hash_by_name(name).new(b("Test")) - signer.sign(hashed) - - from Crypto.Hash import BLAKE2b, BLAKE2s - for hash_size in (20, 32, 48, 64): - hashed_b = BLAKE2b.new(digest_bytes=hash_size, data=b("Test")) - signer.sign(hashed_b) - for hash_size in (16, 20, 28, 32): - hashed_s = BLAKE2s.new(digest_bytes=hash_size, data=b("Test")) - signer.sign(hashed_s) - - -def get_hash_module(hash_name): - if hash_name == "SHA-512": - hash_module = SHA512 - elif hash_name == "SHA-512/224": - hash_module = SHA512.new(truncate="224") - elif hash_name == "SHA-512/256": - hash_module = SHA512.new(truncate="256") - elif hash_name == "SHA-384": - hash_module = SHA384 - elif hash_name == "SHA-256": - hash_module = SHA256 - elif hash_name == "SHA-224": - hash_module = SHA224 - elif hash_name == "SHA-1": - hash_module = SHA1 - else: - raise ValueError("Unknown hash algorithm: " + hash_name) - return hash_module - - -class TestVectorsPSSWycheproof(unittest.TestCase): - - def __init__(self, wycheproof_warnings): - unittest.TestCase.__init__(self) - self._wycheproof_warnings = wycheproof_warnings - self._id = "None" - - def add_tests(self, filename): - - def filter_rsa(group): - return RSA.import_key(group['keyPem']) - - def filter_sha(group): - return get_hash_module(group['sha']) - - def filter_type(group): - type_name = group['type'] - if type_name not in ("RsassaPssVerify", ): - raise ValueError("Unknown type name " + type_name) - - def filter_slen(group): - return group['sLen'] - - def filter_mgf(group): - mgf = group['mgf'] - if mgf not in ("MGF1", ): - raise ValueError("Unknown MGF " + mgf) - mgf1_hash = get_hash_module(group['mgfSha']) - - def mgf(x, y, mh=mgf1_hash): - return MGF1(x, y, mh) - - return mgf - - result = load_test_vectors_wycheproof(("Signature", "wycheproof"), - filename, - "Wycheproof PSS signature (%s)" % filename, - group_tag={'key': filter_rsa, - 'hash_module': filter_sha, - 'sLen': filter_slen, - 'mgf': filter_mgf, - 'type': filter_type}) - return result - - def setUp(self): - self.tv = [] - self.add_tests("rsa_pss_2048_sha1_mgf1_20_test.json") - self.add_tests("rsa_pss_2048_sha256_mgf1_0_test.json") - self.add_tests("rsa_pss_2048_sha256_mgf1_32_test.json") - self.add_tests("rsa_pss_2048_sha512_256_mgf1_28_test.json") - self.add_tests("rsa_pss_2048_sha512_256_mgf1_32_test.json") - self.add_tests("rsa_pss_3072_sha256_mgf1_32_test.json") - self.add_tests("rsa_pss_4096_sha256_mgf1_32_test.json") - self.add_tests("rsa_pss_4096_sha512_mgf1_32_test.json") - self.add_tests("rsa_pss_misc_test.json") - - def shortDescription(self): - return self._id - - def warn(self, tv): - if tv.warning and self._wycheproof_warnings: - import warnings - warnings.warn("Wycheproof warning: %s (%s)" % (self._id, tv.comment)) - - def test_verify(self, tv): - self._id = "Wycheproof RSA PSS Test #%d (%s)" % (tv.id, tv.comment) - - hashed_msg = tv.hash_module.new(tv.msg) - signer = pss.new(tv.key, mask_func=tv.mgf, salt_bytes=tv.sLen) - try: - signature = signer.verify(hashed_msg, tv.sig) - except ValueError as e: - if tv.warning: - return - assert not tv.valid - else: - assert tv.valid - self.warn(tv) - - def runTest(self): - for tv in self.tv: - self.test_verify(tv) - - -def get_tests(config={}): - wycheproof_warnings = config.get('wycheproof_warnings') - - tests = [] - tests += list_test_cases(PSS_Tests) - tests += list_test_cases(FIPS_PKCS1_Verify_Tests) - tests += list_test_cases(FIPS_PKCS1_Sign_Tests) - tests += list_test_cases(PKCS1_Legacy_Module_Tests) - tests += list_test_cases(PKCS1_All_Hashes_Tests) - - if config.get('slow_tests'): - tests += list_test_cases(FIPS_PKCS1_Verify_Tests_KAT) - tests += list_test_cases(FIPS_PKCS1_Sign_Tests_KAT) - - tests += [TestVectorsPSSWycheproof(wycheproof_warnings)] - - return tests - - -if __name__ == '__main__': - def suite(): - return unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/seattle_weather_interactive.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/seattle_weather_interactive.py deleted file mode 100644 index cf65fbffda1b38cb11c493bc76664f3a9931b1ce..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/seattle_weather_interactive.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Seattle Weather Interactive -=========================== -This chart provides an interactive exploration of Seattle weather over the -course of the year. It includes a one-axis brush selection to easily -see the distribution of weather types in a particular date range. -""" -# category: case studies -import altair as alt -from vega_datasets import data - -source = data.seattle_weather() - -scale = alt.Scale(domain=['sun', 'fog', 'drizzle', 'rain', 'snow'], - range=['#e7ba52', '#a7a7a7', '#aec7e8', '#1f77b4', '#9467bd']) -color = alt.Color('weather:N', scale=scale) - -# We create two selections: -# - a brush that is active on the top panel -# - a multi-click that is active on the bottom panel -brush = alt.selection_interval(encodings=['x']) -click = alt.selection_multi(encodings=['color']) - -# Top panel is scatter plot of temperature vs time -points = alt.Chart().mark_point().encode( - alt.X('monthdate(date):T', title='Date'), - alt.Y('temp_max:Q', - title='Maximum Daily Temperature (C)', - scale=alt.Scale(domain=[-5, 40]) - ), - color=alt.condition(brush, color, alt.value('lightgray')), - size=alt.Size('precipitation:Q', scale=alt.Scale(range=[5, 200])) -).properties( - width=550, - height=300 -).add_selection( - brush -).transform_filter( - click -) - -# Bottom panel is a bar chart of weather type -bars = alt.Chart().mark_bar().encode( - x='count()', - y='weather:N', - color=alt.condition(click, color, alt.value('lightgray')), -).transform_filter( - brush -).properties( - width=550, -).add_selection( - click -) - -alt.vconcat( - points, - bars, - data=source, - title="Seattle Weather: 2012-2015" -) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/__init__.py deleted file mode 100644 index 6e81178c81e063691c7d0f7a966afbaf9ed7934b..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -__all__ = ( - "maybe_async", - "maybe_async_cm", - "run", - "sleep", - "sleep_forever", - "sleep_until", - "current_time", - "get_all_backends", - "get_cancelled_exc_class", - "BrokenResourceError", - "BrokenWorkerProcess", - "BusyResourceError", - "ClosedResourceError", - "DelimiterNotFound", - "EndOfStream", - "ExceptionGroup", - "IncompleteRead", - "TypedAttributeLookupError", - "WouldBlock", - "AsyncFile", - "Path", - "open_file", - "wrap_file", - "aclose_forcefully", - "open_signal_receiver", - "connect_tcp", - "connect_unix", - "create_tcp_listener", - "create_unix_listener", - "create_udp_socket", - "create_connected_udp_socket", - "getaddrinfo", - "getnameinfo", - "wait_socket_readable", - "wait_socket_writable", - "create_memory_object_stream", - "run_process", - "open_process", - "create_lock", - "CapacityLimiter", - "CapacityLimiterStatistics", - "Condition", - "ConditionStatistics", - "Event", - "EventStatistics", - "Lock", - "LockStatistics", - "Semaphore", - "SemaphoreStatistics", - "create_condition", - "create_event", - "create_semaphore", - "create_capacity_limiter", - "open_cancel_scope", - "fail_after", - "move_on_after", - "current_effective_deadline", - "TASK_STATUS_IGNORED", - "CancelScope", - "create_task_group", - "TaskInfo", - "get_current_task", - "get_running_tasks", - "wait_all_tasks_blocked", - "run_sync_in_worker_thread", - "run_async_from_thread", - "run_sync_from_thread", - "current_default_worker_thread_limiter", - "create_blocking_portal", - "start_blocking_portal", - "typed_attribute", - "TypedAttributeSet", - "TypedAttributeProvider", -) - -from typing import Any - -from ._core._compat import maybe_async, maybe_async_cm -from ._core._eventloop import ( - current_time, - get_all_backends, - get_cancelled_exc_class, - run, - sleep, - sleep_forever, - sleep_until, -) -from ._core._exceptions import ( - BrokenResourceError, - BrokenWorkerProcess, - BusyResourceError, - ClosedResourceError, - DelimiterNotFound, - EndOfStream, - ExceptionGroup, - IncompleteRead, - TypedAttributeLookupError, - WouldBlock, -) -from ._core._fileio import AsyncFile, Path, open_file, wrap_file -from ._core._resources import aclose_forcefully -from ._core._signals import open_signal_receiver -from ._core._sockets import ( - connect_tcp, - connect_unix, - create_connected_udp_socket, - create_tcp_listener, - create_udp_socket, - create_unix_listener, - getaddrinfo, - getnameinfo, - wait_socket_readable, - wait_socket_writable, -) -from ._core._streams import create_memory_object_stream -from ._core._subprocesses import open_process, run_process -from ._core._synchronization import ( - CapacityLimiter, - CapacityLimiterStatistics, - Condition, - ConditionStatistics, - Event, - EventStatistics, - Lock, - LockStatistics, - Semaphore, - SemaphoreStatistics, - create_capacity_limiter, - create_condition, - create_event, - create_lock, - create_semaphore, -) -from ._core._tasks import ( - TASK_STATUS_IGNORED, - CancelScope, - create_task_group, - current_effective_deadline, - fail_after, - move_on_after, - open_cancel_scope, -) -from ._core._testing import ( - TaskInfo, - get_current_task, - get_running_tasks, - wait_all_tasks_blocked, -) -from ._core._typedattr import TypedAttributeProvider, TypedAttributeSet, typed_attribute - -# Re-exported here, for backwards compatibility -# isort: off -from .to_thread import current_default_worker_thread_limiter, run_sync_in_worker_thread -from .from_thread import ( - create_blocking_portal, - run_async_from_thread, - run_sync_from_thread, - start_blocking_portal, -) - -# Re-export imports so they look like they live directly in this package -key: str -value: Any -for key, value in list(locals().items()): - if getattr(value, "__module__", "").startswith("anyio."): - value.__module__ = __name__ diff --git a/spaces/aseduto/sp500/README.md b/spaces/aseduto/sp500/README.md deleted file mode 100644 index cd2d0d9f416f6837a8354dfb5bd43fd43bbb9c97..0000000000000000000000000000000000000000 --- a/spaces/aseduto/sp500/README.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: S&P 500 -emoji: 📉 📈 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.24.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Artificial Intelligence - S&P 500 Prediction Model using PyTorch - -This is a model using PyTorch that predicts the value of the S&P 500 index based on earnings and interest rates. - -## Introduction - -The model takes into account historical earnings per share (EPS) and treasury rates to make predictions on the S&P 500 index. It has been trained on a significant amount of data and has shown promising results during the testing phase. - -## Data and Training - -The model has been trained on a comprehensive dataset of historical EPS and treasury rates from 1997 to today. - - -## Feedback - -I believe this model can provide valuable insights into the behavior of the stock market and assist in making informed investment decisions. However, I'm still working to further improve the model and explore potential areas of enhancement. - -## How to Use the Model - -It is important to note that no model can perfectly predict the future value of the S&P 500 index. However, these models can be used to generate forecasts that can help investors make informed decisions. - -You can input your expected values for the treasury rate and the S&P Earnings for each future quarters and have the model forecast the index expected value. - -## Contact - -If you have any questions, suggestions, or ideas, please feel free to reach out. Your feedback is greatly appreciated! - - - diff --git a/spaces/aus10powell/TwitterAccounts/app.py b/spaces/aus10powell/TwitterAccounts/app.py deleted file mode 100644 index 454bcb949fd113be7865e74f8ac92b3f4c2d77ca..0000000000000000000000000000000000000000 --- a/spaces/aus10powell/TwitterAccounts/app.py +++ /dev/null @@ -1,328 +0,0 @@ -"""FastAPI endpoint -To run locally use 'uvicorn app:app --host localhost --port 7860' -or -`python -m uvicorn app:app --reload --host localhost --port 7860` -""" -import datetime as dt -import json -import logging -import sys -import spacy - -# sys.setrecursionlimit(20000) -import pandas as pd -import numpy as np -import os -import random -from typing import Dict, List - -import uvicorn -from fastapi import FastAPI, HTTPException, Request, Response -from fastapi.responses import HTMLResponse, JSONResponse -from fastapi.staticfiles import StaticFiles -from fastapi.templating import Jinja2Templates - -from rouge_score import rouge_scorer -# Scripts -import scripts.sentiment as sentiment -import scripts.twitter_scraper as ts -from scripts import sentiment -from scripts.summarization import bert_summarization -from scripts.twitter_scraper import get_latest_account_tweets -from scripts.sentiment import twitter_sentiment_api_score -from scripts import twitter_scraper as ts -import scripts.utils as utils -from scripts import translation -from scripts import generative -import nltk -nltk.download('punkt') -punkt_download_location = nltk.data.path[0] -logging.info(f"punkt_download_location: {punkt_download_location}") - -logging.basicConfig(level=logging.INFO) -pd.set_option('display.max_colwidth', 20) - -app = FastAPI() -templates = Jinja2Templates(directory="templates") -app.mount("/static", StaticFiles(directory="static"), name="static") - -# Construct absolute path to models folder -models_path = os.path.abspath("models") - -username_list = [ - "alikarimi_ak8", - "elonmusk", - "BarackObama", - "taylorlorenz", - "cathiedwood", - "ylecun", -] - -## Static objects/paths -start_date = dt.date(year=2023, month=2, day=1) -end_date = dt.date(year=2023, month=3, day=22) - -# Load spacy module on app start -nlp = spacy.load("en_core_web_sm") -nlp.add_pipe("sentencizer") - - -@app.get("/", response_class=HTMLResponse) -async def webpage(request: Request): - return templates.TemplateResponse("index.html", {"request": request}) - - -@app.get("/accounts") -async def get_accounts() -> List[dict]: - import pandas as pd - - logging.info(f"Pulling account information on {username_list}") - account_info_list = [ - ts.get_twitter_account_info(twitter_handle=account) for account in username_list - ] - df_account = pd.DataFrame(account_info_list) - df_account = df_account.style.bar( - subset=["follower_count", "friends_count"], color="#d65f5f" - ) - df_account = df_account.format( - {"follower_count": "{:,.0f}", "friends_count": "{:,.0f}"} - ) - html_table = df_account.to_html(classes="center", index=False) - - return HTMLResponse(content=html_table, status_code=200) - - -@app.get("/tweets/{username}") -def get_tweets_username(username: str) -> dict: - # Method 2: Use Snscrape - df_tweets = ts.get_tweets(handle=username) - - if isinstance(df_tweets, pd.DataFrame): - df_tweets = df_tweets[["handle", "created_at","retweet_count","view_count","like_count", "full_text"]] - df_tweets["created_at"] = df_tweets["created_at"].dt.strftime( - "%Y-%m-%d %H:%M:%S" - ) - df_tweets = df_tweets.sort_values("created_at", ascending=False) - - # Additional processing - logging.info("Running sentiment on tweets") - sentiments = twitter_sentiment_api_score( - df_tweets['full_text'].to_list(), use_api=False - ) - df_tweets["sentiment"] = [s['argmax'] for s in sentiments] - if username == "alikarimi_ak8": - p = translation.PersianTextProcessor() - df_tweets['full_text_translated'] = df_tweets["full_text"].apply(lambda c: p.translate_text(persian_text = c)) - - - df_tweets_html = df_tweets.to_html(classes="center", index=False, escape=False) - df_tweets.to_html(open("df_tweets_html.html", "w")) - df_tweets_data = df_tweets.to_dict(orient="records") - - response_data = {"html": df_tweets_html, "data": df_tweets_data} - - return JSONResponse(content=response_data, status_code=200) - else: - print("Error: Failed to retrieve tweets.") - return df_tweets - - -@app.get("/audience/{username}", response_model=dict) -async def get_audience(username: str) -> dict: - if username in username_list: - query = f"from:{username} since:{start_date} until:{end_date}" - tweets = ts.get_tweets(query=query) - - n_samples = 5 - # Random sample 3 tweets from user - tweets_sampled = random.sample(tweets, n_samples) - - # Get all replies to sampled tweets - tweet_threads = [] - for tweet in tweets_sampled: - threads = ts.get_replies( - username=tweet["username"], - conversation_id=tweet["conversation_id"], - max_tweets=100, - ) - tweet_threads += threads - - # Get usernames from sample threads tweets - usernames = [t["username"] for t in tweet_threads] - # Get user info from sample replies to sampled tweets of user - info_accounts = [ - ts.get_twitter_account_info(twitter_handle=account) for account in usernames - ] - - # "follower_count":1,"friends_count":20,"verified":false} - # Get stats for followers/audience engaging with tweets - follower_counts = [ - info_accounts[i]["follower_count"] for i in range(len(info_accounts)) - ] - friends_counts = [ - info_accounts[i]["friends_count"] for i in range(len(info_accounts)) - ] - verified_counts = [ - 1 if info_accounts[i]["verified"] == True else 0 - for i in range(len(info_accounts)) - ] - return { - "sample_size": len(info_accounts), - "mean_follower_count": round(np.mean(follower_counts), 3), - "mean_friends_count": round(np.mean(friends_counts), 3), - "mean_verified": round(np.mean(verified_counts), 3), - } - else: - response = Response(content="Account not in scope of project.", status_code=404) - return response - - -@app.get("/sentiment/{username}") -async def get_sentiment(username: str) -> Dict[str, Dict[str, float]]: - if username not in username_list: - raise HTTPException(status_code=404, detail="Account not in scope of project.") - - query = f"from:{username} since:{start_date} until:{end_date}" - tweets = ts.get_tweets(query=query) - n_samples = 5 - tweets_sampled = random.sample(tweets, n_samples) - - tweet_threads = [] - for tweet in tweets_sampled: - threads = ts.get_replies( - username=tweet["username"], - conversation_id=tweet["conversation_id"], - max_tweets=100, - ) - tweet_threads += threads - - print( - f"Total replies to {n_samples} sampled tweets from username: {username}, {len(tweet_threads)}" - ) - - ## Sentiment scoring - print(f"Running tweet sentiment scoring on username: {username} tweets") - tweets_scores = sentiment.get_tweets_sentiment(tweets=tweets) - mean_tweets_score = round(np.mean(tweets_scores), 2) - ci_tweets = utils.wilson_score_interval(tweets_scores) - - # Get sentiment of the threads from tweets - # Get username tweets sentiment - print(f"Running tweet thread sentiment scoring on username: {username} tweets") - threads_scores = sentiment.get_tweets_sentiment(tweets=tweet_threads) - mean_threads_score = round(np.mean(threads_scores), 2) - ci_threads = utils.wilson_score_interval(threads_scores) - - return { - "thread_level": { - "mean": mean_threads_score, - "confidence_interal": ci_threads, - }, - "audience_level": { - "mean": mean_tweets_score, - "confidence_interval": ci_tweets, - }, - } - - -## APIs: Primarily called by the index page -@app.post("/api/generate") -async def generate_text(request: Request): - """Generate text from a prompt. - - Args: - request: The HTTP request. - - Returns: - The generated text. - """ - print("*" * 50) - data = await request.json() - print("*" * 50) - logging.info("POST to api/generate received and processing") - - # Check length of input, if it is greater than 10 tokens, the text is sent off to a summarizer to generate: - try: - generated_text = generative.generate_account_text( - prompt=data["text"], model_dir=os.path.join(models_path, data["account"]) - ) - logging.info("INFO: Successfully generate text from model.") - except Exception as e: - logging.error(f"Error generating text: {e}") - return {"error": "Error generating text"} - # return one example - generated_text = generated_text[0]["generated_text"] - - ################################################### - ## Clean up generate text - # Get rid of final sentence - sentences = nltk.sent_tokenize(generated_text) - unique_sentences = set() - non_duplicate_sentences = [] - for sentence in sentences: - if sentence not in unique_sentences: - non_duplicate_sentences.append(sentence) - unique_sentences.add(sentence) - final_text = " ".join(non_duplicate_sentences[:-1]) - - return {"generated_text": final_text} - - -@app.post("/api/generate_summary") -async def generate_summary(request: Request): - """Generate summary from tweets - - Args: - request: The HTTP request. - - Returns: - The generated text. - """ - - print("*" * 50) - data = await request.json() - print("data", data["tweetsData"]) - # Get the list of text - tweets = [t["full_text"] for t in data["tweetsData"]] - - # Concatenate tweets into a single string - text = " .".join(tweets) - - sentences = nlp(text).sents - - sentences = list(sentences) - - # Option 2 - sampled_sentences = random.sample(sentences, int(0.1 * len(sentences))) - - sampled_sentences = [sentiment.tweet_cleaner(s.text) for s in sampled_sentences] - - # Join the strings into one text blob - tweet_blob = " ".join(sampled_sentences) - - # Generate the summary - summary = bert_summarization(tweet_blob) - print("Summary:", summary) - # Return the summary - return {"tweets_summary": summary} - - -## Historical Tweets pages -@app.get("/examples1") -async def read_examples(): - with open("templates/charts/handle_sentiment_breakdown.html") as f: - html = f.read() - return HTMLResponse(content=html) - - -@app.get("/examples2") -async def read_examples(): - with open("templates/charts/handle_sentiment_timesteps.html") as f: - html = f.read() - return HTMLResponse(content=html) - - -# uvicorn --workers=2 app:app -if __name__ == "__main__": - # uvicorn.run(app, host="0.0.0.0", port=8000) - uvicorn.run("app:app", host="127.0.0.1", port=5050, reload=True) diff --git a/spaces/awacke1/ChatGPTStreamlit9/README.md b/spaces/awacke1/ChatGPTStreamlit9/README.md deleted file mode 100644 index 322f025775f6268ebeffc28dac25e0cd0ddf8bac..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ChatGPTStreamlit9/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatGPTStreamlit9 -emoji: 🌖 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/balgot/text-to-stylegan3/README.md b/spaces/balgot/text-to-stylegan3/README.md deleted file mode 100644 index d49bc65247dfa031716a5821ffccad811d285fac..0000000000000000000000000000000000000000 --- a/spaces/balgot/text-to-stylegan3/README.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Text To Stylegan3 -emoji: 🔥 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.28.1 -app_file: app.py -pinned: false -license: openrail ---- - -This is a demo showcasing the connection of these models: - -* [`sentence-transformers/all-MiniLM-L6-v2`](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) -* [`balgot/text-2-stylegan3`](https://huggingface.co/balgot/bert-2-stylegan3) -* [`StyleGAN3`](https://nvlabs.github.io/stylegan3/) - -> **NOTE:** It is only possible to generate human faces which StyleGAN3 can -generate (see [ffhq](https://github.com/NVlabs/ffhq-dataset)) dataset. -Specifically, only real life human faces can be generated. Furthermore, -the quality of understanding the textual description strongly depends -on the captioning procedure used, so not all face-features can be forced. \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/src/extras/ShapeUtils.js b/spaces/banana-projects/web3d/node_modules/three/src/extras/ShapeUtils.js deleted file mode 100644 index ec050afb571b57ccbbefe2db216f38ff4027c81a..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/extras/ShapeUtils.js +++ /dev/null @@ -1,96 +0,0 @@ -/** - * @author zz85 / http://www.lab4games.net/zz85/blog - */ - -import { Earcut } from './Earcut.js'; - -var ShapeUtils = { - - // calculate area of the contour polygon - - area: function ( contour ) { - - var n = contour.length; - var a = 0.0; - - for ( var p = n - 1, q = 0; q < n; p = q ++ ) { - - a += contour[ p ].x * contour[ q ].y - contour[ q ].x * contour[ p ].y; - - } - - return a * 0.5; - - }, - - isClockWise: function ( pts ) { - - return ShapeUtils.area( pts ) < 0; - - }, - - triangulateShape: function ( contour, holes ) { - - var vertices = []; // flat array of vertices like [ x0,y0, x1,y1, x2,y2, ... ] - var holeIndices = []; // array of hole indices - var faces = []; // final array of vertex indices like [ [ a,b,d ], [ b,c,d ] ] - - removeDupEndPts( contour ); - addContour( vertices, contour ); - - // - - var holeIndex = contour.length; - - holes.forEach( removeDupEndPts ); - - for ( var i = 0; i < holes.length; i ++ ) { - - holeIndices.push( holeIndex ); - holeIndex += holes[ i ].length; - addContour( vertices, holes[ i ] ); - - } - - // - - var triangles = Earcut.triangulate( vertices, holeIndices ); - - // - - for ( var i = 0; i < triangles.length; i += 3 ) { - - faces.push( triangles.slice( i, i + 3 ) ); - - } - - return faces; - - } - -}; - -function removeDupEndPts( points ) { - - var l = points.length; - - if ( l > 2 && points[ l - 1 ].equals( points[ 0 ] ) ) { - - points.pop(); - - } - -} - -function addContour( vertices, contour ) { - - for ( var i = 0; i < contour.length; i ++ ) { - - vertices.push( contour[ i ].x ); - vertices.push( contour[ i ].y ); - - } - -} - -export { ShapeUtils }; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/map_particle_pars_fragment.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/map_particle_pars_fragment.glsl.js deleted file mode 100644 index 3bf8eb007280ef6c7ad0be15171cf1944467d86d..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/map_particle_pars_fragment.glsl.js +++ /dev/null @@ -1,8 +0,0 @@ -export default /* glsl */` -#ifdef USE_MAP - - uniform mat3 uvTransform; - uniform sampler2D map; - -#endif -`; diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/models/video_recurrent_gan_model.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/models/video_recurrent_gan_model.py deleted file mode 100644 index 74cf81145c50ffafb220d22b51e56746dee5ba41..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/models/video_recurrent_gan_model.py +++ /dev/null @@ -1,180 +0,0 @@ -import torch -from collections import OrderedDict - -from basicsr.archs import build_network -from basicsr.losses import build_loss -from basicsr.utils import get_root_logger -from basicsr.utils.registry import MODEL_REGISTRY -from .video_recurrent_model import VideoRecurrentModel - - -@MODEL_REGISTRY.register() -class VideoRecurrentGANModel(VideoRecurrentModel): - - def init_training_settings(self): - train_opt = self.opt['train'] - - self.ema_decay = train_opt.get('ema_decay', 0) - if self.ema_decay > 0: - logger = get_root_logger() - logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}') - # build network net_g with Exponential Moving Average (EMA) - # net_g_ema only used for testing on one GPU and saving. - # There is no need to wrap with DistributedDataParallel - self.net_g_ema = build_network(self.opt['network_g']).to(self.device) - # load pretrained model - load_path = self.opt['path'].get('pretrain_network_g', None) - if load_path is not None: - self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema') - else: - self.model_ema(0) # copy net_g weight - self.net_g_ema.eval() - - # define network net_d - self.net_d = build_network(self.opt['network_d']) - self.net_d = self.model_to_device(self.net_d) - self.print_network(self.net_d) - - # load pretrained models - load_path = self.opt['path'].get('pretrain_network_d', None) - if load_path is not None: - param_key = self.opt['path'].get('param_key_d', 'params') - self.load_network(self.net_d, load_path, self.opt['path'].get('strict_load_d', True), param_key) - - self.net_g.train() - self.net_d.train() - - # define losses - if train_opt.get('pixel_opt'): - self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device) - else: - self.cri_pix = None - - if train_opt.get('perceptual_opt'): - self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device) - else: - self.cri_perceptual = None - - if train_opt.get('gan_opt'): - self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device) - - self.net_d_iters = train_opt.get('net_d_iters', 1) - self.net_d_init_iters = train_opt.get('net_d_init_iters', 0) - - # set up optimizers and schedulers - self.setup_optimizers() - self.setup_schedulers() - - def setup_optimizers(self): - train_opt = self.opt['train'] - if train_opt['fix_flow']: - normal_params = [] - flow_params = [] - for name, param in self.net_g.named_parameters(): - if 'spynet' in name: # The fix_flow now only works for spynet. - flow_params.append(param) - else: - normal_params.append(param) - - optim_params = [ - { # add flow params first - 'params': flow_params, - 'lr': train_opt['lr_flow'] - }, - { - 'params': normal_params, - 'lr': train_opt['optim_g']['lr'] - }, - ] - else: - optim_params = self.net_g.parameters() - - # optimizer g - optim_type = train_opt['optim_g'].pop('type') - self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g']) - self.optimizers.append(self.optimizer_g) - # optimizer d - optim_type = train_opt['optim_d'].pop('type') - self.optimizer_d = self.get_optimizer(optim_type, self.net_d.parameters(), **train_opt['optim_d']) - self.optimizers.append(self.optimizer_d) - - def optimize_parameters(self, current_iter): - logger = get_root_logger() - # optimize net_g - for p in self.net_d.parameters(): - p.requires_grad = False - - if self.fix_flow_iter: - if current_iter == 1: - logger.info(f'Fix flow network and feature extractor for {self.fix_flow_iter} iters.') - for name, param in self.net_g.named_parameters(): - if 'spynet' in name or 'edvr' in name: - param.requires_grad_(False) - elif current_iter == self.fix_flow_iter: - logger.warning('Train all the parameters.') - self.net_g.requires_grad_(True) - - self.optimizer_g.zero_grad() - self.output = self.net_g(self.lq) - - _, _, c, h, w = self.output.size() - - l_g_total = 0 - loss_dict = OrderedDict() - if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): - # pixel loss - if self.cri_pix: - l_g_pix = self.cri_pix(self.output, self.gt) - l_g_total += l_g_pix - loss_dict['l_g_pix'] = l_g_pix - # perceptual loss - if self.cri_perceptual: - l_g_percep, l_g_style = self.cri_perceptual(self.output.view(-1, c, h, w), self.gt.view(-1, c, h, w)) - if l_g_percep is not None: - l_g_total += l_g_percep - loss_dict['l_g_percep'] = l_g_percep - if l_g_style is not None: - l_g_total += l_g_style - loss_dict['l_g_style'] = l_g_style - # gan loss - fake_g_pred = self.net_d(self.output.view(-1, c, h, w)) - l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False) - l_g_total += l_g_gan - loss_dict['l_g_gan'] = l_g_gan - - l_g_total.backward() - self.optimizer_g.step() - - # optimize net_d - for p in self.net_d.parameters(): - p.requires_grad = True - - self.optimizer_d.zero_grad() - # real - # reshape to (b*n, c, h, w) - real_d_pred = self.net_d(self.gt.view(-1, c, h, w)) - l_d_real = self.cri_gan(real_d_pred, True, is_disc=True) - loss_dict['l_d_real'] = l_d_real - loss_dict['out_d_real'] = torch.mean(real_d_pred.detach()) - l_d_real.backward() - # fake - # reshape to (b*n, c, h, w) - fake_d_pred = self.net_d(self.output.view(-1, c, h, w).detach()) - l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True) - loss_dict['l_d_fake'] = l_d_fake - loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach()) - l_d_fake.backward() - self.optimizer_d.step() - - self.log_dict = self.reduce_loss_dict(loss_dict) - - if self.ema_decay > 0: - self.model_ema(decay=self.ema_decay) - - def save(self, epoch, current_iter): - if self.ema_decay > 0: - self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema']) - else: - self.save_network(self.net_g, 'net_g', current_iter) - self.save_network(self.net_d, 'net_d', current_iter) - self.save_training_state(epoch, current_iter) diff --git a/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095508.py b/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095508.py deleted file mode 100644 index d81671aa0eda6f43052a3e8414bab78692679bb9..0000000000000000000000000000000000000000 --- a/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095508.py +++ /dev/null @@ -1,40 +0,0 @@ -#-*- coding : utf-8-*- -import base64 -from subprocess import STDOUT -import streamlit as st -import pandas as pd -import camelot as cam # extracting tables from PDFs - -st.title("PDF Table Extractor") - -input_pdf = st.file_uploader(label = "", type = 'pdf') - -page_number = st.text_input("请填写表格所在PDF页码,eg: 3", value = 1) -background = st.selectbox("表格线条是否隐藏",(False,True),) -if input_pdf is not None: - # byte object into a PDF file - with open("input.pdf", "wb") as f: - base64_pdf = base64.b64encode(input_pdf.read()).decode('utf-8') - f.write(base64.b64decode(base64_pdf)) - f.close() - - # read the pdf and parse it using stream - tables = cam.read_pdf("input.pdf", pages=page_number, process_background=background) - result = pd.ExcelWriter('result.xlsx', engine='xlsxwriter') - tables[0].to_excel(result,index=False) - # for i in range(0,len(tables)): - # table = tables[i].df - # sheetname = str(i) - # table.to_excel(result, sheetname,index=False) - - with open('result.xlsx','rb') as f: - st.download_button('提取完成,点击下载!', f,file_name='result.xlsx',mime="application/vnd.ms-excel") - - tables_all= cam.read_pdf("input.pdf", pages=all, process_background=background) - result_all = pd.ExcelWriter('result_all.xlsx', engine='xlsxwriter') - for i in range(0,len(tables_all)): - table = tables_all[i].df - sheetname = str(i) - table.to_excel(result_all, sheetname,index=False) - with open('result_all.xlsx','rb') as f: - st.download_button('一件抽取完成,点击下载!', f,file_name='result_all.xlsx',mime="application/vnd.ms-excel") \ No newline at end of file diff --git a/spaces/bigscience-data/filter_values_distributions/README.md b/spaces/bigscience-data/filter_values_distributions/README.md deleted file mode 100644 index 5c5013b7595a0f899a92f1a4a3dadb9c69b4c67a..0000000000000000000000000000000000000000 --- a/spaces/bigscience-data/filter_values_distributions/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Filter Values Distributions -emoji: 🐠 -colorFrom: yellow -colorTo: purple -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/blastd/LimoneSorrentin/README.md b/spaces/blastd/LimoneSorrentin/README.md deleted file mode 100644 index a9762e1c11a26508a71307d0aacd911d9c2bcc8b..0000000000000000000000000000000000000000 --- a/spaces/blastd/LimoneSorrentin/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: LimoneSorrentin -emoji: 📉 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.0.20 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/brainblow/AI-TV/public/index.html b/spaces/brainblow/AI-TV/public/index.html deleted file mode 100644 index 1c0597a24821e9c303ad9da37c3fdff5cfadeb6f..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AI-TV/public/index.html +++ /dev/null @@ -1,325 +0,0 @@ - - - 🤖 AI-TV - - - - - -
Loading AI-TV...
- -
- -
-
🤖 AI-TV
-
▶ Current channel:
- -
- -
- -
-
- -
-
-
-
- - - -
-
- -
-
- -
-
-
- -
-
-
-
- - -
- - - - - - - - \ No newline at end of file diff --git a/spaces/brainblow/beat_remixer/beat_manipulator/osu.py b/spaces/brainblow/beat_remixer/beat_manipulator/osu.py deleted file mode 100644 index 8a6becc526702d28eb4f1b42fa91a252b6ff4e09..0000000000000000000000000000000000000000 --- a/spaces/brainblow/beat_remixer/beat_manipulator/osu.py +++ /dev/null @@ -1,244 +0,0 @@ -from . import main -import numpy as np - -# L L L L L L L L L -def generate(song, difficulties = [0.2, 0.1, 0.05, 0.025, 0.01, 0.0075, 0.005, 0.0025], lib='madmom.MultiModelSelectionProcessor', caching=True, log = True, output = '', add_peaks = True): - # for i in difficulties: - # if i<0.005: print(f'Difficulties < 0.005 may result in broken beatmaps, found difficulty = {i}') - if lib.lower == 'stunlocked': add_peaks = False - - if not isinstance(song, main.song): song = main.song(song) - if log is True: print(f'Using {lib}; ', end='') - - filename = song.path.replace('\\', '/').split('/')[-1] - if ' - ' in filename and len(filename.split(' - '))>1: - artist = filename.split(' - ')[0] - title = ' - '.join(filename.split(' - ')[1:]) - else: - artist = '' - title = filename - - if caching is True: - audio_id=hex(len(song.audio[0])) - import os - if not os.path.exists('beat_manipulator/beatmaps'): - os.mkdir('beat_manipulator/beatmaps') - cacheDir="beat_manipulator/beatmaps/" + filename + "_"+lib+"_"+audio_id+'.txt' - try: - beatmap=np.loadtxt(cacheDir) - if log is True: print('loaded cached beatmap.') - except OSError: - if log is True:print("beatmap hasn't been generated yet. Generating...") - beatmap = None - - if beatmap is None: - if 'madmom' in lib.lower(): - from collections.abc import MutableMapping, MutableSequence - import madmom - assert len(song.audio[0])>song.sr*2, f'Audio file is too short, len={len(song.audio[0])} samples, or {len(song.audio[0])/song.sr} seconds. Minimum length is 2 seconds, audio below that breaks madmom processors.' - if lib=='madmom.RNNBeatProcessor': - proc = madmom.features.beats.RNNBeatProcessor() - beatmap = proc(madmom.audio.signal.Signal(song.audio.T, song.sr)) - elif lib=='madmom.MultiModelSelectionProcessor': - proc = madmom.features.beats.RNNBeatProcessor(post_processor=None) - predictions = proc(madmom.audio.signal.Signal(song.audio.T, song.sr)) - mm_proc = madmom.features.beats.MultiModelSelectionProcessor(num_ref_predictions=None) - beatmap= mm_proc(predictions)*song.sr - beatmap/= np.max(beatmap) - elif lib=='stunlocked': - spikes = np.abs(np.gradient(np.clip(song.audio[0], -1, 1)))[:int(len(song.audio[0]) - (len(song.audio[0])%int(song.sr/100)))] - spikes = spikes.reshape(-1, (int(song.sr/100))) - spikes = np.asarray(list(np.max(i) for i in spikes)) - if len(beatmap) > len(spikes): beatmap = beatmap[:len(spikes)] - elif len(spikes) > len(beatmap): spikes = spikes[:len(beatmap)] - zeroing = 0 - for i in range(len(spikes)): - if zeroing > 0: - if spikes[i] <= 0.1: zeroing -=1 - spikes[i] = 0 - elif spikes[i] >= 0.1: - spikes[i] = 1 - zeroing = 7 - if spikes[i] <= 0.1: spikes[i] = 0 - beatmap = spikes - - if caching is True: np.savetxt(cacheDir, beatmap) - - if add_peaks is True: - spikes = np.abs(np.gradient(np.clip(song.audio[0], -1, 1)))[:int(len(song.audio[0]) - (len(song.audio[0])%int(song.sr/100)))] - spikes = spikes.reshape(-1, (int(song.sr/100))) - spikes = np.asarray(list(np.max(i) for i in spikes)) - if len(beatmap) > len(spikes): beatmap = beatmap[:len(spikes)] - elif len(spikes) > len(beatmap): spikes = spikes[:len(beatmap)] - zeroing = 0 - for i in range(len(spikes)): - if zeroing > 0: - if spikes[i] <= 0.1: zeroing -=1 - spikes[i] = 0 - elif spikes[i] >= 0.1: - spikes[i] = 1 - zeroing = 7 - if spikes[i] <= 0.1: spikes[i] = 0 - else: spikes = None - - def _process(song: main.song, beatmap, spikes, threshold): - '''ඞ''' - if add_peaks is True: beatmap += spikes - hitmap=[] - actual_samplerate=int(song.sr/100) - beat_middle=int(actual_samplerate/2) - for i in range(len(beatmap)): - if beatmap[i]>threshold: hitmap.append(i*actual_samplerate + beat_middle) - hitmap=np.asarray(hitmap) - clump=[] - for i in range(len(hitmap)-1): - #print(i, abs(song.beatmap[i]-song.beatmap[i+1]), clump) - if abs(hitmap[i] - hitmap[i+1]) < song.sr/16 and i != len(hitmap)-2: clump.append(i) - elif clump!=[]: - clump.append(i) - actual_time=hitmap[clump[0]] - hitmap[np.array(clump)]=0 - #print(song.beatmap) - hitmap[clump[0]]=actual_time - clump=[] - - hitmap=hitmap[hitmap!=0] - return hitmap - - osufile=lambda title,artist,version: ("osu file format v14\n" - "\n" - "[General]\n" - f"AudioFilename: {song.path.split('/')[-1]}\n" - "AudioLeadIn: 0\n" - "PreviewTime: -1\n" - "Countdown: 0\n" - "SampleSet: Normal\n" - "StackLeniency: 0.5\n" - "Mode: 0\n" - "LetterboxInBreaks: 0\n" - "WidescreenStoryboard: 0\n" - "\n" - "[Editor]\n" - "DistanceSpacing: 1.1\n" - "BeatDivisor: 4\n" - "GridSize: 8\n" - "TimelineZoom: 1.6\n" - "\n" - "[Metadata]\n" - f"Title:{title}\n" - f"TitleUnicode:{title}\n" - f"Artist:{artist}\n" - f"ArtistUnicode:{artist}\n" - f'Creator:{lib} + BeatManipulator\n' - f'Version:{version} {lib}\n' - 'Source:\n' - 'Tags:BeatManipulator\n' - 'BeatmapID:0\n' - 'BeatmapSetID:-1\n' - '\n' - '[Difficulty]\n' - 'HPDrainRate:4\n' - 'CircleSize:4\n' - 'OverallDifficulty:5\n' - 'ApproachRate:10\n' - 'SliderMultiplier:3.3\n' - 'SliderTickRate:1\n' - '\n' - '[Events]\n' - '//Background and Video events\n' - '//Break Periods\n' - '//Storyboard Layer 0 (Background)\n' - '//Storyboard Layer 1 (Fail)\n' - '//Storyboard Layer 2 (Pass)\n' - '//Storyboard Layer 3 (Foreground)\n' - '//Storyboard Layer 4 (Overlay)\n' - '//Storyboard Sound Samples\n' - '\n' - '[TimingPoints]\n' - '0,140.0,4,1,0,100,1,0\n' - '\n' - '\n' - '[HitObjects]\n') - # remove the clumps - #print(self.beatmap) - - #print(self.beatmap) - - - #print(len(osumap)) - #input('banana') - import shutil, os - if os.path.exists('beat_manipulator/temp'): shutil.rmtree('beat_manipulator/temp') - os.mkdir('beat_manipulator/temp') - hitmap=[] - import random - for difficulty in difficulties: - for i in range(4): - #print(i) - this_difficulty=_process(song, beatmap, spikes, difficulty) - hitmap.append(this_difficulty) - - for k in range(len(hitmap)): - osumap=np.vstack((hitmap[k],np.zeros(len(hitmap[k])),np.zeros(len(hitmap[k])))).T - difficulty= difficulties[k] - for i in range(len(osumap)-1): - if i==0:continue - dist=(osumap[i,0]-osumap[i-1,0])*(1-(difficulty**0.3)) - if dist<1000: dist=0.005 - elif dist<2000: dist=0.01 - elif dist<3000: dist=0.015 - elif dist<4000: dist=0.02 - elif dist<5000: dist=0.25 - elif dist<6000: dist=0.35 - elif dist<7000: dist=0.45 - elif dist<8000: dist=0.55 - elif dist<9000: dist=0.65 - elif dist<10000: dist=0.75 - elif dist<12500: dist=0.85 - elif dist<15000: dist=0.95 - elif dist<20000: dist=1 - #elif dist<30000: dist=0.8 - prev_x=osumap[i-1,1] - prev_y=osumap[i-1,2] - if prev_x>0: prev_x=prev_x-dist*0.1 - elif prev_x<0: prev_x=prev_x+dist*0.1 - if prev_y>0: prev_y=prev_y-dist*0.1 - elif prev_y<0: prev_y=prev_y+dist*0.1 - dirx=random.uniform(-dist,dist) - diry=dist-abs(dirx)*random.choice([-1, 1]) - if abs(prev_x+dirx)>1: dirx=-dirx - if abs(prev_y+diry)>1: diry=-diry - x=prev_x+dirx - y=prev_y+diry - #print(dirx,diry,x,y) - #print(x>1, x<1, y>1, y<1) - if x>1: x=0.8 - if x<-1: x=-0.8 - if y>1: y=0.8 - if y<-1: y=-0.8 - #print(dirx,diry,x,y) - osumap[i,1]=x - osumap[i,2]=y - - osumap[:,1]*=300 - osumap[:,1]+=300 - osumap[:,2]*=180 - osumap[:,2]+=220 - - file=osufile(artist, title, difficulty) - for j in osumap: - #print('285,70,'+str(int(int(i)*1000/self.samplerate))+',1,0') - file+=f'{int(j[1])},{int(j[2])},{str(int(int(j[0])*1000/song.sr))},1,0\n' - with open(f'beat_manipulator/temp/{artist} - {title} (BeatManipulator {difficulty} {lib}].osu', 'x', encoding="utf-8") as f: - f.write(file) - from . import io - import shutil, os - shutil.copyfile(song.path, 'beat_manipulator/temp/'+filename) - shutil.make_archive('beat_manipulator_osz', 'zip', 'beat_manipulator/temp') - outputname = io._outputfilename(path = output, filename = song.path, suffix = ' ('+lib + ')', ext = 'osz') - if not os.path.exists(outputname): - os.rename('beat_manipulator_osz.zip', outputname) - if log is True: print(f'Created `{outputname}`') - else: print(f'{outputname} already exists!') - shutil.rmtree('beat_manipulator/temp') - return outputname \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/datasets/coco.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/datasets/coco.py deleted file mode 100644 index ed4f7ccb20efa3b54c719783e279c381ca5d8587..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/datasets/coco.py +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import contextlib -import datetime -import io -import json -import logging -import numpy as np -import os -import shutil -import pycocotools.mask as mask_util -from fvcore.common.timer import Timer -from iopath.common.file_io import file_lock -from PIL import Image - -from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes -from detectron2.utils.file_io import PathManager - -from .. import DatasetCatalog, MetadataCatalog - -""" -This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". -""" - - -logger = logging.getLogger(__name__) - -__all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"] - - -def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): - """ - Load a json file with COCO's instances annotation format. - Currently supports instance detection, instance segmentation, - and person keypoints annotations. - - Args: - json_file (str): full path to the json file in COCO instances annotation format. - image_root (str or path-like): the directory where the images in this json file exists. - dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). - When provided, this function will also do the following: - - * Put "thing_classes" into the metadata associated with this dataset. - * Map the category ids into a contiguous range (needed by standard dataset format), - and add "thing_dataset_id_to_contiguous_id" to the metadata associated - with this dataset. - - This option should usually be provided, unless users need to load - the original json content and apply more processing manually. - extra_annotation_keys (list[str]): list of per-annotation keys that should also be - loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", - "category_id", "segmentation"). The values for these keys will be returned as-is. - For example, the densepose annotations are loaded in this way. - - Returns: - list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See - `Using Custom Datasets `_ ) when `dataset_name` is not None. - If `dataset_name` is None, the returned `category_ids` may be - incontiguous and may not conform to the Detectron2 standard format. - - Notes: - 1. This function does not read the image files. - The results do not have the "image" field. - """ - from pycocotools.coco import COCO - - timer = Timer() - json_file = PathManager.get_local_path(json_file) - with contextlib.redirect_stdout(io.StringIO()): - coco_api = COCO(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) - - id_map = None - if dataset_name is not None: - meta = MetadataCatalog.get(dataset_name) - cat_ids = sorted(coco_api.getCatIds()) - cats = coco_api.loadCats(cat_ids) - # The categories in a custom json file may not be sorted. - thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] - meta.thing_classes = thing_classes - - # In COCO, certain category ids are artificially removed, - # and by convention they are always ignored. - # We deal with COCO's id issue and translate - # the category ids to contiguous ids in [0, 80). - - # It works by looking at the "categories" field in the json, therefore - # if users' own json also have incontiguous ids, we'll - # apply this mapping as well but print a warning. - if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): - if "coco" not in dataset_name: - logger.warning( - """ -Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. -""" - ) - id_map = {v: i for i, v in enumerate(cat_ids)} - meta.thing_dataset_id_to_contiguous_id = id_map - - # sort indices for reproducible results - img_ids = sorted(coco_api.imgs.keys()) - # imgs is a list of dicts, each looks something like: - # {'license': 4, - # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', - # 'file_name': 'COCO_val2014_000000001268.jpg', - # 'height': 427, - # 'width': 640, - # 'date_captured': '2013-11-17 05:57:24', - # 'id': 1268} - imgs = coco_api.loadImgs(img_ids) - # anns is a list[list[dict]], where each dict is an annotation - # record for an object. The inner list enumerates the objects in an image - # and the outer list enumerates over images. Example of anns[0]: - # [{'segmentation': [[192.81, - # 247.09, - # ... - # 219.03, - # 249.06]], - # 'area': 1035.749, - # 'iscrowd': 0, - # 'image_id': 1268, - # 'bbox': [192.81, 224.8, 74.73, 33.43], - # 'category_id': 16, - # 'id': 42986}, - # ...] - anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] - total_num_valid_anns = sum([len(x) for x in anns]) - total_num_anns = len(coco_api.anns) - if total_num_valid_anns < total_num_anns: - logger.warning( - f"{json_file} contains {total_num_anns} annotations, but only " - f"{total_num_valid_anns} of them match to images in the file." - ) - - if "minival" not in json_file: - # The popular valminusminival & minival annotations for COCO2014 contain this bug. - # However the ratio of buggy annotations there is tiny and does not affect accuracy. - # Therefore we explicitly white-list them. - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( - json_file - ) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) - - dataset_dicts = [] - - ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) - - num_instances_without_valid_segmentation = 0 - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - record["file_name"] = os.path.join(image_root, img_dict["file_name"]) - record["height"] = img_dict["height"] - record["width"] = img_dict["width"] - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - # Check that the image_id in this annotation is the same as - # the image_id we're looking at. - # This fails only when the data parsing logic or the annotation file is buggy. - - # The original COCO valminusminival2014 & minival2014 annotation files - # actually contains bugs that, together with certain ways of using COCO API, - # can trigger this assertion. - assert anno["image_id"] == image_id - - assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' - - obj = {key: anno[key] for key in ann_keys if key in anno} - if "bbox" in obj and len(obj["bbox"]) == 0: - raise ValueError( - f"One annotation of image {image_id} contains empty 'bbox' value! " - "This json does not have valid COCO format." - ) - - segm = anno.get("segmentation", None) - if segm: # either list[list[float]] or dict(RLE) - if isinstance(segm, dict): - if isinstance(segm["counts"], list): - # convert to compressed RLE - segm = mask_util.frPyObjects(segm, *segm["size"]) - else: - # filter out invalid polygons (< 3 points) - segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] - if len(segm) == 0: - num_instances_without_valid_segmentation += 1 - continue # ignore this instance - obj["segmentation"] = segm - - keypts = anno.get("keypoints", None) - if keypts: # list[int] - for idx, v in enumerate(keypts): - if idx % 3 != 2: - # COCO's segmentation coordinates are floating points in [0, H or W], - # but keypoint coordinates are integers in [0, H-1 or W-1] - # Therefore we assume the coordinates are "pixel indices" and - # add 0.5 to convert to floating point coordinates. - keypts[idx] = v + 0.5 - obj["keypoints"] = keypts - - obj["bbox_mode"] = BoxMode.XYWH_ABS - if id_map: - annotation_category_id = obj["category_id"] - try: - obj["category_id"] = id_map[annotation_category_id] - except KeyError as e: - raise KeyError( - f"Encountered category_id={annotation_category_id} " - "but this id does not exist in 'categories' of the json file." - ) from e - objs.append(obj) - record["annotations"] = objs - dataset_dicts.append(record) - - if num_instances_without_valid_segmentation > 0: - logger.warning( - "Filtered out {} instances without valid segmentation. ".format( - num_instances_without_valid_segmentation - ) - + "There might be issues in your dataset generation process. Please " - "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" - ) - return dataset_dicts - - -def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): - """ - Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are - treated as ground truth annotations and all files under "image_root" with "image_ext" extension - as input images. Ground truth and input images are matched using file paths relative to - "gt_root" and "image_root" respectively without taking into account file extensions. - This works for COCO as well as some other datasets. - - Args: - gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation - annotations are stored as images with integer values in pixels that represent - corresponding semantic labels. - image_root (str): the directory where the input images are. - gt_ext (str): file extension for ground truth annotations. - image_ext (str): file extension for input images. - - Returns: - list[dict]: - a list of dicts in detectron2 standard format without instance-level - annotation. - - Notes: - 1. This function does not read the image and ground truth files. - The results do not have the "image" and "sem_seg" fields. - """ - - # We match input images with ground truth based on their relative filepaths (without file - # extensions) starting from 'image_root' and 'gt_root' respectively. - def file2id(folder_path, file_path): - # extract relative path starting from `folder_path` - image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) - # remove file extension - image_id = os.path.splitext(image_id)[0] - return image_id - - input_files = sorted( - (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), - key=lambda file_path: file2id(image_root, file_path), - ) - gt_files = sorted( - (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), - key=lambda file_path: file2id(gt_root, file_path), - ) - - assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) - - # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images - if len(input_files) != len(gt_files): - logger.warn( - "Directory {} and {} has {} and {} files, respectively.".format( - image_root, gt_root, len(input_files), len(gt_files) - ) - ) - input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] - gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] - intersect = list(set(input_basenames) & set(gt_basenames)) - # sort, otherwise each worker may obtain a list[dict] in different order - intersect = sorted(intersect) - logger.warn("Will use their intersection of {} files.".format(len(intersect))) - input_files = [os.path.join(image_root, f + image_ext) for f in intersect] - gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] - - logger.info( - "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) - ) - - dataset_dicts = [] - for (img_path, gt_path) in zip(input_files, gt_files): - record = {} - record["file_name"] = img_path - record["sem_seg_file_name"] = gt_path - dataset_dicts.append(record) - - return dataset_dicts - - -def convert_to_coco_dict(dataset_name): - """ - Convert an instance detection/segmentation or keypoint detection dataset - in detectron2's standard format into COCO json format. - - Generic dataset description can be found here: - https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset - - COCO data format description can be found here: - http://cocodataset.org/#format-data - - Args: - dataset_name (str): - name of the source dataset - Must be registered in DatastCatalog and in detectron2's standard format. - Must have corresponding metadata "thing_classes" - Returns: - coco_dict: serializable dict in COCO json format - """ - - dataset_dicts = DatasetCatalog.get(dataset_name) - metadata = MetadataCatalog.get(dataset_name) - - # unmap the category mapping ids for COCO - if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): - reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} - reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa - else: - reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa - - categories = [ - {"id": reverse_id_mapper(id), "name": name} - for id, name in enumerate(metadata.thing_classes) - ] - - logger.info("Converting dataset dicts into COCO format") - coco_images = [] - coco_annotations = [] - - for image_id, image_dict in enumerate(dataset_dicts): - coco_image = { - "id": image_dict.get("image_id", image_id), - "width": int(image_dict["width"]), - "height": int(image_dict["height"]), - "file_name": str(image_dict["file_name"]), - } - coco_images.append(coco_image) - - anns_per_image = image_dict.get("annotations", []) - for annotation in anns_per_image: - # create a new dict with only COCO fields - coco_annotation = {} - - # COCO requirement: XYWH box format for axis-align and XYWHA for rotated - bbox = annotation["bbox"] - if isinstance(bbox, np.ndarray): - if bbox.ndim != 1: - raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") - bbox = bbox.tolist() - if len(bbox) not in [4, 5]: - raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") - from_bbox_mode = annotation["bbox_mode"] - to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS - bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) - - # COCO requirement: instance area - if "segmentation" in annotation: - # Computing areas for instances by counting the pixels - segmentation = annotation["segmentation"] - # TODO: check segmentation type: RLE, BinaryMask or Polygon - if isinstance(segmentation, list): - polygons = PolygonMasks([segmentation]) - area = polygons.area()[0].item() - elif isinstance(segmentation, dict): # RLE - area = mask_util.area(segmentation).item() - else: - raise TypeError(f"Unknown segmentation type {type(segmentation)}!") - else: - # Computing areas using bounding boxes - if to_bbox_mode == BoxMode.XYWH_ABS: - bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) - area = Boxes([bbox_xy]).area()[0].item() - else: - area = RotatedBoxes([bbox]).area()[0].item() - - if "keypoints" in annotation: - keypoints = annotation["keypoints"] # list[int] - for idx, v in enumerate(keypoints): - if idx % 3 != 2: - # COCO's segmentation coordinates are floating points in [0, H or W], - # but keypoint coordinates are integers in [0, H-1 or W-1] - # For COCO format consistency we substract 0.5 - # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163 - keypoints[idx] = v - 0.5 - if "num_keypoints" in annotation: - num_keypoints = annotation["num_keypoints"] - else: - num_keypoints = sum(kp > 0 for kp in keypoints[2::3]) - - # COCO requirement: - # linking annotations to images - # "id" field must start with 1 - coco_annotation["id"] = len(coco_annotations) + 1 - coco_annotation["image_id"] = coco_image["id"] - coco_annotation["bbox"] = [round(float(x), 3) for x in bbox] - coco_annotation["area"] = float(area) - coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0)) - coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"])) - - # Add optional fields - if "keypoints" in annotation: - coco_annotation["keypoints"] = keypoints - coco_annotation["num_keypoints"] = num_keypoints - - if "segmentation" in annotation: - seg = coco_annotation["segmentation"] = annotation["segmentation"] - if isinstance(seg, dict): # RLE - counts = seg["counts"] - if not isinstance(counts, str): - # make it json-serializable - seg["counts"] = counts.decode("ascii") - - coco_annotations.append(coco_annotation) - - logger.info( - "Conversion finished, " - f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}" - ) - - info = { - "date_created": str(datetime.datetime.now()), - "description": "Automatically generated COCO json file for Detectron2.", - } - coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None} - if len(coco_annotations) > 0: - coco_dict["annotations"] = coco_annotations - return coco_dict - - -def convert_to_coco_json(dataset_name, output_file, allow_cached=True): - """ - Converts dataset into COCO format and saves it to a json file. - dataset_name must be registered in DatasetCatalog and in detectron2's standard format. - - Args: - dataset_name: - reference from the config file to the catalogs - must be registered in DatasetCatalog and in detectron2's standard format - output_file: path of json file that will be saved to - allow_cached: if json file is already present then skip conversion - """ - - # TODO: The dataset or the conversion script *may* change, - # a checksum would be useful for validating the cached data - - PathManager.mkdirs(os.path.dirname(output_file)) - with file_lock(output_file): - if PathManager.exists(output_file) and allow_cached: - logger.warning( - f"Using previously cached COCO format annotations at '{output_file}'. " - "You need to clear the cache file if your dataset has been modified." - ) - else: - logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)") - coco_dict = convert_to_coco_dict(dataset_name) - - logger.info(f"Caching COCO format annotations at '{output_file}' ...") - tmp_file = output_file + ".tmp" - with PathManager.open(tmp_file, "w") as f: - json.dump(coco_dict, f) - shutil.move(tmp_file, output_file) - - -def register_coco_instances(name, metadata, json_file, image_root): - """ - Register a dataset in COCO's json annotation format for - instance detection, instance segmentation and keypoint detection. - (i.e., Type 1 and 2 in http://cocodataset.org/#format-data. - `instances*.json` and `person_keypoints*.json` in the dataset). - - This is an example of how to register a new dataset. - You can do something similar to this function, to register new datasets. - - Args: - name (str): the name that identifies a dataset, e.g. "coco_2014_train". - metadata (dict): extra metadata associated with this dataset. You can - leave it as an empty dict. - json_file (str): path to the json instance annotation file. - image_root (str or path-like): directory which contains all the images. - """ - assert isinstance(name, str), name - assert isinstance(json_file, (str, os.PathLike)), json_file - assert isinstance(image_root, (str, os.PathLike)), image_root - # 1. register a function which returns dicts - DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) - - # 2. Optionally, add metadata about this dataset, - # since they might be useful in evaluation, visualization or logging - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata - ) - - -if __name__ == "__main__": - """ - Test the COCO json dataset loader. - - Usage: - python -m detectron2.data.datasets.coco \ - path/to/json path/to/image_root dataset_name - - "dataset_name" can be "coco_2014_minival_100", or other - pre-registered ones - """ - from detectron2.utils.logger import setup_logger - from detectron2.utils.visualizer import Visualizer - import detectron2.data.datasets # noqa # add pre-defined metadata - import sys - - logger = setup_logger(name=__name__) - assert sys.argv[3] in DatasetCatalog.list() - meta = MetadataCatalog.get(sys.argv[3]) - - dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3]) - logger.info("Done loading {} samples.".format(len(dicts))) - - dirname = "coco-data-vis" - os.makedirs(dirname, exist_ok=True) - for d in dicts: - img = np.array(Image.open(d["file_name"])) - visualizer = Visualizer(img, metadata=meta) - vis = visualizer.draw_dataset_dict(d) - fpath = os.path.join(dirname, os.path.basename(d["file_name"])) - vis.save(fpath) diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/transform/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/transform/__init__.py deleted file mode 100644 index 369e1b278899b225d55bfc729514873b4259c7b9..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/transform/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .image import ImageResizeTransform diff --git a/spaces/cahodk/live-ml5-facemesh-p5js/index.html b/spaces/cahodk/live-ml5-facemesh-p5js/index.html deleted file mode 100644 index 161ad643319020a2bfeacc26ff284eebd85e0e6e..0000000000000000000000000000000000000000 --- a/spaces/cahodk/live-ml5-facemesh-p5js/index.html +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - - PoseNet example using p5.js - - - - - - - - - - -

PoseNet example using p5.js

-

Loading model...

- - - - \ No newline at end of file diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/tools/export_onnx.py b/spaces/chendl/compositional_test/multimodal/YOLOX/tools/export_onnx.py deleted file mode 100644 index 8703166a4ee487d2d4b713b42c6f8c55879281db..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/tools/export_onnx.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -import argparse -import os -from loguru import logger - -import torch -from torch import nn - -from yolox.exp import get_exp -from yolox.models.network_blocks import SiLU -from yolox.utils import replace_module - - -def make_parser(): - parser = argparse.ArgumentParser("YOLOX onnx deploy") - parser.add_argument( - "--output-name", type=str, default="yolox.onnx", help="output name of models" - ) - parser.add_argument( - "--input", default="images", type=str, help="input node name of onnx model" - ) - parser.add_argument( - "--output", default="output", type=str, help="output node name of onnx model" - ) - parser.add_argument( - "-o", "--opset", default=11, type=int, help="onnx opset version" - ) - parser.add_argument("--batch-size", type=int, default=1, help="batch size") - parser.add_argument( - "--dynamic", action="store_true", help="whether the input shape should be dynamic or not" - ) - parser.add_argument("--no-onnxsim", action="store_true", help="use onnxsim or not") - parser.add_argument( - "-f", - "--exp_file", - default=None, - type=str, - help="experiment description file", - ) - parser.add_argument("-expn", "--experiment-name", type=str, default=None) - parser.add_argument("-n", "--name", type=str, default=None, help="model name") - parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path") - parser.add_argument( - "opts", - help="Modify config options using the command-line", - default=None, - nargs=argparse.REMAINDER, - ) - parser.add_argument( - "--decode_in_inference", - action="store_true", - help="decode in inference or not" - ) - - return parser - - -@logger.catch -def main(): - args = make_parser().parse_args() - logger.info("args value: {}".format(args)) - exp = get_exp(args.exp_file, args.name) - exp.merge(args.opts) - - if not args.experiment_name: - args.experiment_name = exp.exp_name - - model = exp.get_model() - if args.ckpt is None: - file_name = os.path.join(exp.output_dir, args.experiment_name) - ckpt_file = os.path.join(file_name, "best_ckpt.pth") - else: - ckpt_file = args.ckpt - - # load the model state dict - ckpt = torch.load(ckpt_file, map_location="cpu") - - model.eval() - if "model" in ckpt: - ckpt = ckpt["model"] - model.load_state_dict(ckpt) - model = replace_module(model, nn.SiLU, SiLU) - model.head.decode_in_inference = args.decode_in_inference - - logger.info("loading checkpoint done.") - dummy_input = torch.randn(args.batch_size, 3, exp.test_size[0], exp.test_size[1]) - - torch.onnx._export( - model, - dummy_input, - args.output_name, - input_names=[args.input], - output_names=[args.output], - dynamic_axes={args.input: {0: 'batch'}, - args.output: {0: 'batch'}} if args.dynamic else None, - opset_version=args.opset, - ) - logger.info("generated onnx model named {}".format(args.output_name)) - - if not args.no_onnxsim: - import onnx - from onnxsim import simplify - - # use onnx-simplifier to reduce reduent model. - onnx_model = onnx.load(args.output_name) - model_simp, check = simplify(onnx_model) - assert check, "Simplified ONNX model could not be validated" - onnx.save(model_simp, args.output_name) - logger.info("generated simplified onnx model named {}".format(args.output_name)) - - -if __name__ == "__main__": - main() diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/cocoeval/cocoeval.cpp b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/cocoeval/cocoeval.cpp deleted file mode 100644 index 2e63bc9952918060f55999ec100b283d83616b46..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/cocoeval/cocoeval.cpp +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -#include "cocoeval.h" -#include -#include -#include -#include - -using namespace pybind11::literals; - -namespace COCOeval { - -// Sort detections from highest score to lowest, such that -// detection_instances[detection_sorted_indices[t]] >= -// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match -// original COCO API -void SortInstancesByDetectionScore( - const std::vector& detection_instances, - std::vector* detection_sorted_indices) { - detection_sorted_indices->resize(detection_instances.size()); - std::iota( - detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); - std::stable_sort( - detection_sorted_indices->begin(), - detection_sorted_indices->end(), - [&detection_instances](size_t j1, size_t j2) { - return detection_instances[j1].score > detection_instances[j2].score; - }); -} - -// Partition the ground truth objects based on whether or not to ignore them -// based on area -void SortInstancesByIgnore( - const std::array& area_range, - const std::vector& ground_truth_instances, - std::vector* ground_truth_sorted_indices, - std::vector* ignores) { - ignores->clear(); - ignores->reserve(ground_truth_instances.size()); - for (auto o : ground_truth_instances) { - ignores->push_back( - o.ignore || o.area < area_range[0] || o.area > area_range[1]); - } - - ground_truth_sorted_indices->resize(ground_truth_instances.size()); - std::iota( - ground_truth_sorted_indices->begin(), - ground_truth_sorted_indices->end(), - 0); - std::stable_sort( - ground_truth_sorted_indices->begin(), - ground_truth_sorted_indices->end(), - [&ignores](size_t j1, size_t j2) { - return (int)(*ignores)[j1] < (int)(*ignores)[j2]; - }); -} - -// For each IOU threshold, greedily match each detected instance to a ground -// truth instance (if possible) and store the results -void MatchDetectionsToGroundTruth( - const std::vector& detection_instances, - const std::vector& detection_sorted_indices, - const std::vector& ground_truth_instances, - const std::vector& ground_truth_sorted_indices, - const std::vector& ignores, - const std::vector>& ious, - const std::vector& iou_thresholds, - const std::array& area_range, - ImageEvaluation* results) { - // Initialize memory to store return data matches and ignore - const int num_iou_thresholds = iou_thresholds.size(); - const int num_ground_truth = ground_truth_sorted_indices.size(); - const int num_detections = detection_sorted_indices.size(); - std::vector ground_truth_matches( - num_iou_thresholds * num_ground_truth, 0); - std::vector& detection_matches = results->detection_matches; - std::vector& detection_ignores = results->detection_ignores; - std::vector& ground_truth_ignores = results->ground_truth_ignores; - detection_matches.resize(num_iou_thresholds * num_detections, 0); - detection_ignores.resize(num_iou_thresholds * num_detections, false); - ground_truth_ignores.resize(num_ground_truth); - for (auto g = 0; g < num_ground_truth; ++g) { - ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]]; - } - - for (auto t = 0; t < num_iou_thresholds; ++t) { - for (auto d = 0; d < num_detections; ++d) { - // information about best match so far (match=-1 -> unmatched) - double best_iou = std::min(iou_thresholds[t], 1 - 1e-10); - int match = -1; - for (auto g = 0; g < num_ground_truth; ++g) { - // if this ground truth instance is already matched and not a - // crowd, it cannot be matched to another detection - if (ground_truth_matches[t * num_ground_truth + g] > 0 && - !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) { - continue; - } - - // if detected instance matched to a regular ground truth - // instance, we can break on the first ground truth instance - // tagged as ignore (because they are sorted by the ignore tag) - if (match >= 0 && !ground_truth_ignores[match] && - ground_truth_ignores[g]) { - break; - } - - // if IOU overlap is the best so far, store the match appropriately - if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) { - best_iou = ious[d][ground_truth_sorted_indices[g]]; - match = g; - } - } - // if match was made, store id of match for both detection and - // ground truth - if (match >= 0) { - detection_ignores[t * num_detections + d] = ground_truth_ignores[match]; - detection_matches[t * num_detections + d] = - ground_truth_instances[ground_truth_sorted_indices[match]].id; - ground_truth_matches[t * num_ground_truth + match] = - detection_instances[detection_sorted_indices[d]].id; - } - - // set unmatched detections outside of area range to ignore - const InstanceAnnotation& detection = - detection_instances[detection_sorted_indices[d]]; - detection_ignores[t * num_detections + d] = - detection_ignores[t * num_detections + d] || - (detection_matches[t * num_detections + d] == 0 && - (detection.area < area_range[0] || detection.area > area_range[1])); - } - } - - // store detection score results - results->detection_scores.resize(detection_sorted_indices.size()); - for (size_t d = 0; d < detection_sorted_indices.size(); ++d) { - results->detection_scores[d] = - detection_instances[detection_sorted_indices[d]].score; - } -} - -std::vector EvaluateImages( - const std::vector>& area_ranges, - int max_detections, - const std::vector& iou_thresholds, - const ImageCategoryInstances>& image_category_ious, - const ImageCategoryInstances& - image_category_ground_truth_instances, - const ImageCategoryInstances& - image_category_detection_instances) { - const int num_area_ranges = area_ranges.size(); - const int num_images = image_category_ground_truth_instances.size(); - const int num_categories = - image_category_ious.size() > 0 ? image_category_ious[0].size() : 0; - std::vector detection_sorted_indices; - std::vector ground_truth_sorted_indices; - std::vector ignores; - std::vector results_all( - num_images * num_area_ranges * num_categories); - - // Store results for each image, category, and area range combination. Results - // for each IOU threshold are packed into the same ImageEvaluation object - for (auto i = 0; i < num_images; ++i) { - for (auto c = 0; c < num_categories; ++c) { - const std::vector& ground_truth_instances = - image_category_ground_truth_instances[i][c]; - const std::vector& detection_instances = - image_category_detection_instances[i][c]; - - SortInstancesByDetectionScore( - detection_instances, &detection_sorted_indices); - if ((int)detection_sorted_indices.size() > max_detections) { - detection_sorted_indices.resize(max_detections); - } - - for (size_t a = 0; a < area_ranges.size(); ++a) { - SortInstancesByIgnore( - area_ranges[a], - ground_truth_instances, - &ground_truth_sorted_indices, - &ignores); - - MatchDetectionsToGroundTruth( - detection_instances, - detection_sorted_indices, - ground_truth_instances, - ground_truth_sorted_indices, - ignores, - image_category_ious[i][c], - iou_thresholds, - area_ranges[a], - &results_all - [c * num_area_ranges * num_images + a * num_images + i]); - } - } - } - - return results_all; -} - -// Convert a python list to a vector -template -std::vector list_to_vec(const py::list& l) { - std::vector v(py::len(l)); - for (int i = 0; i < (int)py::len(l); ++i) { - v[i] = l[i].cast(); - } - return v; -} - -// Helper function to Accumulate() -// Considers the evaluation results applicable to a particular category, area -// range, and max_detections parameter setting, which begin at -// evaluations[evaluation_index]. Extracts a sorted list of length n of all -// applicable detection instances concatenated across all images in the dataset, -// which are represented by the outputs evaluation_indices, detection_scores, -// image_detection_indices, and detection_sorted_indices--all of which are -// length n. evaluation_indices[i] stores the applicable index into -// evaluations[] for instance i, which has detection score detection_score[i], -// and is the image_detection_indices[i]'th of the list of detections -// for the image containing i. detection_sorted_indices[] defines a sorted -// permutation of the 3 other outputs -int BuildSortedDetectionList( - const std::vector& evaluations, - const int64_t evaluation_index, - const int64_t num_images, - const int max_detections, - std::vector* evaluation_indices, - std::vector* detection_scores, - std::vector* detection_sorted_indices, - std::vector* image_detection_indices) { - assert(evaluations.size() >= evaluation_index + num_images); - - // Extract a list of object instances of the applicable category, area - // range, and max detections requirements such that they can be sorted - image_detection_indices->clear(); - evaluation_indices->clear(); - detection_scores->clear(); - image_detection_indices->reserve(num_images * max_detections); - evaluation_indices->reserve(num_images * max_detections); - detection_scores->reserve(num_images * max_detections); - int num_valid_ground_truth = 0; - for (auto i = 0; i < num_images; ++i) { - const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; - - for (int d = 0; - d < (int)evaluation.detection_scores.size() && d < max_detections; - ++d) { // detected instances - evaluation_indices->push_back(evaluation_index + i); - image_detection_indices->push_back(d); - detection_scores->push_back(evaluation.detection_scores[d]); - } - for (auto ground_truth_ignore : evaluation.ground_truth_ignores) { - if (!ground_truth_ignore) { - ++num_valid_ground_truth; - } - } - } - - // Sort detections by decreasing score, using stable sort to match - // python implementation - detection_sorted_indices->resize(detection_scores->size()); - std::iota( - detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); - std::stable_sort( - detection_sorted_indices->begin(), - detection_sorted_indices->end(), - [&detection_scores](size_t j1, size_t j2) { - return (*detection_scores)[j1] > (*detection_scores)[j2]; - }); - - return num_valid_ground_truth; -} - -// Helper function to Accumulate() -// Compute a precision recall curve given a sorted list of detected instances -// encoded in evaluations, evaluation_indices, detection_scores, -// detection_sorted_indices, image_detection_indices (see -// BuildSortedDetectionList()). Using vectors precisions and recalls -// and temporary storage, output the results into precisions_out, recalls_out, -// and scores_out, which are large buffers containing many precion/recall curves -// for all possible parameter settings, with precisions_out_index and -// recalls_out_index defining the applicable indices to store results. -void ComputePrecisionRecallCurve( - const int64_t precisions_out_index, - const int64_t precisions_out_stride, - const int64_t recalls_out_index, - const std::vector& recall_thresholds, - const int iou_threshold_index, - const int num_iou_thresholds, - const int num_valid_ground_truth, - const std::vector& evaluations, - const std::vector& evaluation_indices, - const std::vector& detection_scores, - const std::vector& detection_sorted_indices, - const std::vector& image_detection_indices, - std::vector* precisions, - std::vector* recalls, - std::vector* precisions_out, - std::vector* scores_out, - std::vector* recalls_out) { - assert(recalls_out->size() > recalls_out_index); - - // Compute precision/recall for each instance in the sorted list of detections - int64_t true_positives_sum = 0, false_positives_sum = 0; - precisions->clear(); - recalls->clear(); - precisions->reserve(detection_sorted_indices.size()); - recalls->reserve(detection_sorted_indices.size()); - assert(!evaluations.empty() || detection_sorted_indices.empty()); - for (auto detection_sorted_index : detection_sorted_indices) { - const ImageEvaluation& evaluation = - evaluations[evaluation_indices[detection_sorted_index]]; - const auto num_detections = - evaluation.detection_matches.size() / num_iou_thresholds; - const auto detection_index = iou_threshold_index * num_detections + - image_detection_indices[detection_sorted_index]; - assert(evaluation.detection_matches.size() > detection_index); - assert(evaluation.detection_ignores.size() > detection_index); - const int64_t detection_match = - evaluation.detection_matches[detection_index]; - const bool detection_ignores = - evaluation.detection_ignores[detection_index]; - const auto true_positive = detection_match > 0 && !detection_ignores; - const auto false_positive = detection_match == 0 && !detection_ignores; - if (true_positive) { - ++true_positives_sum; - } - if (false_positive) { - ++false_positives_sum; - } - - const double recall = - static_cast(true_positives_sum) / num_valid_ground_truth; - recalls->push_back(recall); - const int64_t num_valid_detections = - true_positives_sum + false_positives_sum; - const double precision = num_valid_detections > 0 - ? static_cast(true_positives_sum) / num_valid_detections - : 0.0; - precisions->push_back(precision); - } - - (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0; - - for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) { - if ((*precisions)[i] > (*precisions)[i - 1]) { - (*precisions)[i - 1] = (*precisions)[i]; - } - } - - // Sample the per instance precision/recall list at each recall threshold - for (size_t r = 0; r < recall_thresholds.size(); ++r) { - // first index in recalls >= recall_thresholds[r] - std::vector::iterator low = std::lower_bound( - recalls->begin(), recalls->end(), recall_thresholds[r]); - size_t precisions_index = low - recalls->begin(); - - const auto results_ind = precisions_out_index + r * precisions_out_stride; - assert(results_ind < precisions_out->size()); - assert(results_ind < scores_out->size()); - if (precisions_index < precisions->size()) { - (*precisions_out)[results_ind] = (*precisions)[precisions_index]; - (*scores_out)[results_ind] = - detection_scores[detection_sorted_indices[precisions_index]]; - } else { - (*precisions_out)[results_ind] = 0; - (*scores_out)[results_ind] = 0; - } - } -} -py::dict Accumulate( - const py::object& params, - const std::vector& evaluations) { - const std::vector recall_thresholds = - list_to_vec(params.attr("recThrs")); - const std::vector max_detections = - list_to_vec(params.attr("maxDets")); - const int num_iou_thresholds = py::len(params.attr("iouThrs")); - const int num_recall_thresholds = py::len(params.attr("recThrs")); - const int num_categories = params.attr("useCats").cast() == 1 - ? py::len(params.attr("catIds")) - : 1; - const int num_area_ranges = py::len(params.attr("areaRng")); - const int num_max_detections = py::len(params.attr("maxDets")); - const int num_images = py::len(params.attr("imgIds")); - - std::vector precisions_out( - num_iou_thresholds * num_recall_thresholds * num_categories * - num_area_ranges * num_max_detections, - -1); - std::vector recalls_out( - num_iou_thresholds * num_categories * num_area_ranges * - num_max_detections, - -1); - std::vector scores_out( - num_iou_thresholds * num_recall_thresholds * num_categories * - num_area_ranges * num_max_detections, - -1); - - // Consider the list of all detected instances in the entire dataset in one - // large list. evaluation_indices, detection_scores, - // image_detection_indices, and detection_sorted_indices all have the same - // length as this list, such that each entry corresponds to one detected - // instance - std::vector evaluation_indices; // indices into evaluations[] - std::vector detection_scores; // detection scores of each instance - std::vector detection_sorted_indices; // sorted indices of all - // instances in the dataset - std::vector - image_detection_indices; // indices into the list of detected instances in - // the same image as each instance - std::vector precisions, recalls; - - for (auto c = 0; c < num_categories; ++c) { - for (auto a = 0; a < num_area_ranges; ++a) { - for (auto m = 0; m < num_max_detections; ++m) { - // The COCO PythonAPI assumes evaluations[] (the return value of - // COCOeval::EvaluateImages() is one long list storing results for each - // combination of category, area range, and image id, with categories in - // the outermost loop and images in the innermost loop. - const int64_t evaluations_index = - c * num_area_ranges * num_images + a * num_images; - int num_valid_ground_truth = BuildSortedDetectionList( - evaluations, - evaluations_index, - num_images, - max_detections[m], - &evaluation_indices, - &detection_scores, - &detection_sorted_indices, - &image_detection_indices); - - if (num_valid_ground_truth == 0) { - continue; - } - - for (auto t = 0; t < num_iou_thresholds; ++t) { - // recalls_out is a flattened vectors representing a - // num_iou_thresholds X num_categories X num_area_ranges X - // num_max_detections matrix - const int64_t recalls_out_index = - t * num_categories * num_area_ranges * num_max_detections + - c * num_area_ranges * num_max_detections + - a * num_max_detections + m; - - // precisions_out and scores_out are flattened vectors - // representing a num_iou_thresholds X num_recall_thresholds X - // num_categories X num_area_ranges X num_max_detections matrix - const int64_t precisions_out_stride = - num_categories * num_area_ranges * num_max_detections; - const int64_t precisions_out_index = t * num_recall_thresholds * - num_categories * num_area_ranges * num_max_detections + - c * num_area_ranges * num_max_detections + - a * num_max_detections + m; - - ComputePrecisionRecallCurve( - precisions_out_index, - precisions_out_stride, - recalls_out_index, - recall_thresholds, - t, - num_iou_thresholds, - num_valid_ground_truth, - evaluations, - evaluation_indices, - detection_scores, - detection_sorted_indices, - image_detection_indices, - &precisions, - &recalls, - &precisions_out, - &scores_out, - &recalls_out); - } - } - } - } - - time_t rawtime; - struct tm local_time; - std::array buffer; - time(&rawtime); -#ifdef _WIN32 - localtime_s(&local_time, &rawtime); -#else - localtime_r(&rawtime, &local_time); -#endif - strftime( - buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time); - return py::dict( - "params"_a = params, - "counts"_a = std::vector({num_iou_thresholds, - num_recall_thresholds, - num_categories, - num_area_ranges, - num_max_detections}), - "date"_a = buffer, - "precision"_a = precisions_out, - "recall"_a = recalls_out, - "scores"_a = scores_out); -} - -} // namespace COCOeval diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/network_blocks.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/network_blocks.py deleted file mode 100644 index 68aacfc33208eab072422e0647742006984dfdfd..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/network_blocks.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- -# Copyright (c) Megvii Inc. All rights reserved. - -import torch -import torch.nn as nn - - -class SiLU(nn.Module): - """export-friendly version of nn.SiLU()""" - - @staticmethod - def forward(x): - return x * torch.sigmoid(x) - - -def get_activation(name="silu", inplace=True): - if name == "silu": - module = nn.SiLU(inplace=inplace) - elif name == "relu": - module = nn.ReLU(inplace=inplace) - elif name == "lrelu": - module = nn.LeakyReLU(0.1, inplace=inplace) - else: - raise AttributeError("Unsupported act type: {}".format(name)) - return module - - -class BaseConv(nn.Module): - """A Conv2d -> Batchnorm -> silu/leaky relu block""" - - def __init__( - self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu" - ): - super().__init__() - # same padding - pad = (ksize - 1) // 2 - self.conv = nn.Conv2d( - in_channels, - out_channels, - kernel_size=ksize, - stride=stride, - padding=pad, - groups=groups, - bias=bias, - ) - self.bn = nn.BatchNorm2d(out_channels) - self.act = get_activation(act, inplace=True) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def fuseforward(self, x): - return self.act(self.conv(x)) - - -class DWConv(nn.Module): - """Depthwise Conv + Conv""" - - def __init__(self, in_channels, out_channels, ksize, stride=1, act="silu"): - super().__init__() - self.dconv = BaseConv( - in_channels, - in_channels, - ksize=ksize, - stride=stride, - groups=in_channels, - act=act, - ) - self.pconv = BaseConv( - in_channels, out_channels, ksize=1, stride=1, groups=1, act=act - ) - - def forward(self, x): - x = self.dconv(x) - return self.pconv(x) - - -class Bottleneck(nn.Module): - # Standard bottleneck - def __init__( - self, - in_channels, - out_channels, - shortcut=True, - expansion=0.5, - depthwise=False, - act="silu", - ): - super().__init__() - hidden_channels = int(out_channels * expansion) - Conv = DWConv if depthwise else BaseConv - self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) - self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act) - self.use_add = shortcut and in_channels == out_channels - - def forward(self, x): - y = self.conv2(self.conv1(x)) - if self.use_add: - y = y + x - return y - - -class ResLayer(nn.Module): - "Residual layer with `in_channels` inputs." - - def __init__(self, in_channels: int): - super().__init__() - mid_channels = in_channels // 2 - self.layer1 = BaseConv( - in_channels, mid_channels, ksize=1, stride=1, act="lrelu" - ) - self.layer2 = BaseConv( - mid_channels, in_channels, ksize=3, stride=1, act="lrelu" - ) - - def forward(self, x): - out = self.layer2(self.layer1(x)) - return x + out - - -class SPPBottleneck(nn.Module): - """Spatial pyramid pooling layer used in YOLOv3-SPP""" - - def __init__( - self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation="silu" - ): - super().__init__() - hidden_channels = in_channels // 2 - self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation) - self.m = nn.ModuleList( - [ - nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) - for ks in kernel_sizes - ] - ) - conv2_channels = hidden_channels * (len(kernel_sizes) + 1) - self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation) - - def forward(self, x): - x = self.conv1(x) - x = torch.cat([x] + [m(x) for m in self.m], dim=1) - x = self.conv2(x) - return x - - -class CSPLayer(nn.Module): - """C3 in yolov5, CSP Bottleneck with 3 convolutions""" - - def __init__( - self, - in_channels, - out_channels, - n=1, - shortcut=True, - expansion=0.5, - depthwise=False, - act="silu", - ): - """ - Args: - in_channels (int): input channels. - out_channels (int): output channels. - n (int): number of Bottlenecks. Default value: 1. - """ - # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - hidden_channels = int(out_channels * expansion) # hidden channels - self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) - self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) - self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act) - module_list = [ - Bottleneck( - hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act - ) - for _ in range(n) - ] - self.m = nn.Sequential(*module_list) - - def forward(self, x): - x_1 = self.conv1(x) - x_2 = self.conv2(x) - x_1 = self.m(x_1) - x = torch.cat((x_1, x_2), dim=1) - return self.conv3(x) - - -class Focus(nn.Module): - """Focus width and height information into channel space.""" - - def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="silu"): - super().__init__() - self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act) - - def forward(self, x): - # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) - patch_top_left = x[..., ::2, ::2] - patch_top_right = x[..., ::2, 1::2] - patch_bot_left = x[..., 1::2, ::2] - patch_bot_right = x[..., 1::2, 1::2] - x = torch.cat( - ( - patch_top_left, - patch_bot_left, - patch_top_right, - patch_bot_right, - ), - dim=1, - ) - return self.conv(x) diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/minify_dataset.py b/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/minify_dataset.py deleted file mode 100644 index e6095cecc8e99f231b80a3779b594cc29fd0ddda..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/minify_dataset.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path - -import fire - - -def minify(src_dir: str, dest_dir: str, n: int): - """Write first n lines of each file f in src_dir to dest_dir/f""" - src_dir = Path(src_dir) - dest_dir = Path(dest_dir) - dest_dir.mkdir(exist_ok=True) - for path in src_dir.iterdir(): - new = [x.rstrip() for x in list(path.open().readlines())][:n] - dest_path = dest_dir.joinpath(path.name) - print(dest_path) - dest_path.open("w").write("\n".join(new)) - - -if __name__ == "__main__": - fire.Fire(minify) diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/onnx/summarization/README.md b/spaces/chendl/compositional_test/transformers/examples/research_projects/onnx/summarization/README.md deleted file mode 100644 index c43b0450ea2c4bfacb2e9f5e2af2b6b41d6b340d..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/onnx/summarization/README.md +++ /dev/null @@ -1,43 +0,0 @@ - - -# Bart + Beam Search to ONNX - -Author: [@fatcat-z](https://github.com/fatcat-z) - -This folder contains an example of exporting Bart + Beam Search generation (`BartForConditionalGeneration`) to ONNX. - -Beam Search contains a for-loop workflow, so we need to make them TorchScript-compatible for exporting to ONNX. This example shows how to make a Bart model be TorchScript-compatible by wrapping up it into a new model. In addition, some changes were made to the `beam_search()` function to make it TorchScript-compatible. - - -## How to run the example - -To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: - -```bash -git clone https://github.com/huggingface/transformers -cd transformers -pip install '.[onnxruntime]' -``` -Then cd in this example folder and run -```bash -pip install -r requirements.txt -``` - -Now you can run the example command below to get the example ONNX file: - -```bash -python run_onnx_exporter.py --model_name_or_path facebook/bart-base -``` diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh b/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh deleted file mode 100644 index 6a1bafbdc9c8c944e407bb766a1e5fe6177b0404..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -export PYTHONPATH="../":"${PYTHONPATH}" - -export BS=32 -export GAS=1 - -python finetune.py \ - --learning_rate=3e-5 \ - --fp16 \ - --gpus 1 \ - --do_train \ - --do_predict \ - --val_check_interval 0.25 \ - --n_val 500 \ - --num_train_epochs 2 \ - --freeze_encoder --freeze_embeds --data_dir cnn_dm \ - --max_target_length 142 --val_max_target_length=142 \ - --train_batch_size=$BS --eval_batch_size=$BS --gradient_accumulation_steps=$GAS \ - --model_name_or_path sshleifer/student_cnn_12_6 \ - --tokenizer_name facebook/bart-large \ - --warmup_steps 500 \ - --output_dir distilbart-cnn-12-6 \ - "$@" - diff --git a/spaces/chenman/Meina-MeinaMix/app.py b/spaces/chenman/Meina-MeinaMix/app.py deleted file mode 100644 index 811eafa7aa381ad173de55aa27fb9c091aa0199f..0000000000000000000000000000000000000000 --- a/spaces/chenman/Meina-MeinaMix/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Meina/MeinaMix").launch() \ No newline at end of file diff --git a/spaces/chlab/interactive_kinematic_planet_detector/utils/__init__.py b/spaces/chlab/interactive_kinematic_planet_detector/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/client_ws.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/client_ws.py deleted file mode 100644 index 9a8ba84ca5082ad6d672c3837d4810e467a8080e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/client_ws.py +++ /dev/null @@ -1,300 +0,0 @@ -"""WebSocket client for asyncio.""" - -import asyncio -from typing import Any, Optional, cast - -import async_timeout - -from .client_exceptions import ClientError -from .client_reqrep import ClientResponse -from .helpers import call_later, set_result -from .http import ( - WS_CLOSED_MESSAGE, - WS_CLOSING_MESSAGE, - WebSocketError, - WSCloseCode, - WSMessage, - WSMsgType, -) -from .http_websocket import WebSocketWriter # WSMessage -from .streams import EofStream, FlowControlDataQueue -from .typedefs import ( - DEFAULT_JSON_DECODER, - DEFAULT_JSON_ENCODER, - JSONDecoder, - JSONEncoder, -) - - -class ClientWebSocketResponse: - def __init__( - self, - reader: "FlowControlDataQueue[WSMessage]", - writer: WebSocketWriter, - protocol: Optional[str], - response: ClientResponse, - timeout: float, - autoclose: bool, - autoping: bool, - loop: asyncio.AbstractEventLoop, - *, - receive_timeout: Optional[float] = None, - heartbeat: Optional[float] = None, - compress: int = 0, - client_notakeover: bool = False, - ) -> None: - self._response = response - self._conn = response.connection - - self._writer = writer - self._reader = reader - self._protocol = protocol - self._closed = False - self._closing = False - self._close_code: Optional[int] = None - self._timeout = timeout - self._receive_timeout = receive_timeout - self._autoclose = autoclose - self._autoping = autoping - self._heartbeat = heartbeat - self._heartbeat_cb: Optional[asyncio.TimerHandle] = None - if heartbeat is not None: - self._pong_heartbeat = heartbeat / 2.0 - self._pong_response_cb: Optional[asyncio.TimerHandle] = None - self._loop = loop - self._waiting: Optional[asyncio.Future[bool]] = None - self._exception: Optional[BaseException] = None - self._compress = compress - self._client_notakeover = client_notakeover - - self._reset_heartbeat() - - def _cancel_heartbeat(self) -> None: - if self._pong_response_cb is not None: - self._pong_response_cb.cancel() - self._pong_response_cb = None - - if self._heartbeat_cb is not None: - self._heartbeat_cb.cancel() - self._heartbeat_cb = None - - def _reset_heartbeat(self) -> None: - self._cancel_heartbeat() - - if self._heartbeat is not None: - self._heartbeat_cb = call_later( - self._send_heartbeat, self._heartbeat, self._loop - ) - - def _send_heartbeat(self) -> None: - if self._heartbeat is not None and not self._closed: - # fire-and-forget a task is not perfect but maybe ok for - # sending ping. Otherwise we need a long-living heartbeat - # task in the class. - self._loop.create_task(self._writer.ping()) - - if self._pong_response_cb is not None: - self._pong_response_cb.cancel() - self._pong_response_cb = call_later( - self._pong_not_received, self._pong_heartbeat, self._loop - ) - - def _pong_not_received(self) -> None: - if not self._closed: - self._closed = True - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._exception = asyncio.TimeoutError() - self._response.close() - - @property - def closed(self) -> bool: - return self._closed - - @property - def close_code(self) -> Optional[int]: - return self._close_code - - @property - def protocol(self) -> Optional[str]: - return self._protocol - - @property - def compress(self) -> int: - return self._compress - - @property - def client_notakeover(self) -> bool: - return self._client_notakeover - - def get_extra_info(self, name: str, default: Any = None) -> Any: - """extra info from connection transport""" - conn = self._response.connection - if conn is None: - return default - transport = conn.transport - if transport is None: - return default - return transport.get_extra_info(name, default) - - def exception(self) -> Optional[BaseException]: - return self._exception - - async def ping(self, message: bytes = b"") -> None: - await self._writer.ping(message) - - async def pong(self, message: bytes = b"") -> None: - await self._writer.pong(message) - - async def send_str(self, data: str, compress: Optional[int] = None) -> None: - if not isinstance(data, str): - raise TypeError("data argument must be str (%r)" % type(data)) - await self._writer.send(data, binary=False, compress=compress) - - async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None: - if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError("data argument must be byte-ish (%r)" % type(data)) - await self._writer.send(data, binary=True, compress=compress) - - async def send_json( - self, - data: Any, - compress: Optional[int] = None, - *, - dumps: JSONEncoder = DEFAULT_JSON_ENCODER, - ) -> None: - await self.send_str(dumps(data), compress=compress) - - async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool: - # we need to break `receive()` cycle first, - # `close()` may be called from different task - if self._waiting is not None and not self._closed: - self._reader.feed_data(WS_CLOSING_MESSAGE, 0) - await self._waiting - - if not self._closed: - self._cancel_heartbeat() - self._closed = True - try: - await self._writer.close(code, message) - except asyncio.CancelledError: - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._response.close() - raise - except Exception as exc: - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._exception = exc - self._response.close() - return True - - if self._closing: - self._response.close() - return True - - while True: - try: - async with async_timeout.timeout(self._timeout): - msg = await self._reader.read() - except asyncio.CancelledError: - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._response.close() - raise - except Exception as exc: - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._exception = exc - self._response.close() - return True - - if msg.type == WSMsgType.CLOSE: - self._close_code = msg.data - self._response.close() - return True - else: - return False - - async def receive(self, timeout: Optional[float] = None) -> WSMessage: - while True: - if self._waiting is not None: - raise RuntimeError("Concurrent call to receive() is not allowed") - - if self._closed: - return WS_CLOSED_MESSAGE - elif self._closing: - await self.close() - return WS_CLOSED_MESSAGE - - try: - self._waiting = self._loop.create_future() - try: - async with async_timeout.timeout(timeout or self._receive_timeout): - msg = await self._reader.read() - self._reset_heartbeat() - finally: - waiter = self._waiting - self._waiting = None - set_result(waiter, True) - except (asyncio.CancelledError, asyncio.TimeoutError): - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - raise - except EofStream: - self._close_code = WSCloseCode.OK - await self.close() - return WSMessage(WSMsgType.CLOSED, None, None) - except ClientError: - self._closed = True - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - return WS_CLOSED_MESSAGE - except WebSocketError as exc: - self._close_code = exc.code - await self.close(code=exc.code) - return WSMessage(WSMsgType.ERROR, exc, None) - except Exception as exc: - self._exception = exc - self._closing = True - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - await self.close() - return WSMessage(WSMsgType.ERROR, exc, None) - - if msg.type == WSMsgType.CLOSE: - self._closing = True - self._close_code = msg.data - if not self._closed and self._autoclose: - await self.close() - elif msg.type == WSMsgType.CLOSING: - self._closing = True - elif msg.type == WSMsgType.PING and self._autoping: - await self.pong(msg.data) - continue - elif msg.type == WSMsgType.PONG and self._autoping: - continue - - return msg - - async def receive_str(self, *, timeout: Optional[float] = None) -> str: - msg = await self.receive(timeout) - if msg.type != WSMsgType.TEXT: - raise TypeError(f"Received message {msg.type}:{msg.data!r} is not str") - return cast(str, msg.data) - - async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes: - msg = await self.receive(timeout) - if msg.type != WSMsgType.BINARY: - raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes") - return cast(bytes, msg.data) - - async def receive_json( - self, - *, - loads: JSONDecoder = DEFAULT_JSON_DECODER, - timeout: Optional[float] = None, - ) -> Any: - data = await self.receive_str(timeout=timeout) - return loads(data) - - def __aiter__(self) -> "ClientWebSocketResponse": - return self - - async def __anext__(self) -> WSMessage: - msg = await self.receive() - if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): - raise StopAsyncIteration - return msg diff --git a/spaces/cihyFjudo/fairness-paper-search/Cypheros TS-Doctor 1.22 Portable PORTABLE.md b/spaces/cihyFjudo/fairness-paper-search/Cypheros TS-Doctor 1.22 Portable PORTABLE.md deleted file mode 100644 index 4e7cd4afc66b41bdae12adc7081215e48701c9a8..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Cypheros TS-Doctor 1.22 Portable PORTABLE.md +++ /dev/null @@ -1,6 +0,0 @@ -

Cypheros TS-Doctor 1.22 Portable


Download Zip ››››› https://tinurli.com/2uwjIP



- - aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Download Film Il Risveglio Del Dinosauro 3 Full Movie.md b/spaces/cihyFjudo/fairness-paper-search/Download Film Il Risveglio Del Dinosauro 3 Full Movie.md deleted file mode 100644 index de6d14cb14c35a6fe33b8671271e53bb36c7d06e..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Download Film Il Risveglio Del Dinosauro 3 Full Movie.md +++ /dev/null @@ -1,6 +0,0 @@ -

download film Il risveglio del dinosauro 3 full movie


Download Zip 🔗 https://tinurli.com/2uwkBM



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Nobel Font Free Download Mac The Ultimate Guide to Installing and Using This Classic Font.md b/spaces/cihyFjudo/fairness-paper-search/Nobel Font Free Download Mac The Ultimate Guide to Installing and Using This Classic Font.md deleted file mode 100644 index 5056188b060c3b81cc24066ea5707589ac7ec64d..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Nobel Font Free Download Mac The Ultimate Guide to Installing and Using This Classic Font.md +++ /dev/null @@ -1,24 +0,0 @@ -
-

Click to view font family "Nobel".Nobel BoldNobel Condensed BoldNobel Condensed RegularNobel LightNobel Regular Italic About the font Nobel RegularBe aware that the Nobel Regular font is free for personal knowledge and use only. However, you need to contact the author for commercial use or for any support.You can use the Nobel Regular to create interesting designs, covers, shop and store name and logos.Also, the Nobel Regular font is perfect for branding projects, housewares designs, product packaging, or simply as a stylish text overlay on any background image.FamilyNobelSub-familyRegularVersion001.000AuthorCompanySiteCopyrightLicenceFor personal use onlyLicence MaisFontesFor personal use onlyMost wanted:fontes gratis, baixar fontes gratis, font ttf, fontes para word gratis, fonts free Typography Nobel RegularTo evaluate the typeface, in this section there is a preview of which we select 31 special characters or with accents, 26 letters of the alphabet in upper and lower case and the numbering from 0 to 10. The letters will be the same after installed in your operating system, either for viewing or for printing. Nobel Regular font authorFurthermore, about all the content of this source, we also provide some additional information from the author and/or company. Therefore, if you need to clarify doubts about the license for personal or commercial use, please contact the author. Author not found. License informationThe Nobel Regular font provided is for typography style knowledge only. The download is completely free for personal use and the font cannot be used for commercial purposes.Therefore, if you wish to use this font for commercial purposes, you must purchase a license or contact the author for permission to use it. How to install the Nobel Regular fontYou can install the Nobel Regular font on any operating system. For safety and to ensure that there is no Malware or malicious software, downloading the source file é compressed in ZIP format. Fonts are in OTF (OpenType) or TTF (TrueType) format.

  • Click here to install the font on Microsoft Windows (all versions).
  • Click here to install the font on MAC OS.
Content related to Nobel RegularWe found new special content and prepared with all dedication! The content below is related to the source Nobel Regular. Click on the topic you want to learn more! Download Nobel FontsThe elegance of a good font can make a great visual impact to the end user. Discover Nobel fonts and download for free. Download variations of Nobel RegularAccording to the Nobel Regular font family, below, we have listed other fonts that may be useful for your project. We have made an improved selection especially for you.Random fonts: Click to load 3 other fontsNobel Bold Download this fontNobel Condensed Bold Download this fontNobel Condensed Regular Download this fontNobel Light Download this fontNobel Regular Italic Download this font Leave your feedback for the Nobel Regular fontFinally, it's very important that we know your feedback about the Nobel Regular font. Also tell us what type of project you used. Sharing your opinion and ideas will help many other participants in the MaisFontes community to improve the arts.

Also take the opportunity to share on social networks or click SAVE to keep this font in your fonts panel in the User Portal. Create a free account on MaisFontes by clicking here. Cloud words: Nobel Regular Nobel Regular font download;Nobel Regular font free;Nobel Regular download;Nobel Regular Font;Nobel Regular Logotipo;free font Nobel Regular;Nobel Regular free font;Font Nobel Regular; × Nobel RegularEmail type correctly your email Cancel Send email× Click to show the lettertypenobel-regular.png
Save imageDonate and help us!Continue browsing

Type your comment below. Cancel CommentComentários ComentarBe the first to comment.if(typeof ez_ad_units!='undefined')ez_ad_units.push([[300,250],'maisfontes_com-medrectangle-1','ezslot_11',117,'0','0']);__ez_fad_position('div-gpt-ad-maisfontes_com-medrectangle-1-0');report this ad ©MaisFontes 2014-2023

-

Nobel Font Free Download Mac


DOWNLOAD ->>->>->> https://tinurli.com/2uwiTB



-

There were 6 fonts were included in the font family but this font only selected for the work of Guggenheim Museum with the pairing of Bangers font. This font would provide a vintage look to your designs. This font family is also included in the Google fonts as well as Adobe fonts. You can free download this font from our website but only for personal projects. Brandon Grotesque Font and Nobel Font are the most similar fonts to the Verlag font.

-

This font would provide a vintage look to your designs. This font family is also included in the Google fonts as well as Adobe fonts. You can free download this font from our website but only for personal projects. Brandon Grotesque Font and Nobel Font are the most similar fonts to the Verlag font.

-

If you are looking for a free version of this typeface then we providing a freeware font and you can utilize the font in your personal projects. You just need to click on the below download button to get to your operating systems.

-

If your on windows and have access to your fonts library through the control or command center you can simply find a free download of the font that you need and copy them into your fonts library. restart adobe and you should have the new fonts

-

-

A total of 6 fonts are available in this font family and Guggenheim Museum used this font with the combination of Bangers font. Its light black weight is the best known for its pairing functions. This would be an ideal font to take a traditional look at your designs. Moreover, It can be freely used withing Adobe fonts and it is also available in Google fonts.

-

It has wonderful 192 glyphs and providing more than 250 stylish characters that are very suitable for your titling and headlines. Brandon Grotesque Font and Nobel Font are the most similar fonts to the Verlag font. We are providing a free version of this font family on this website and you can utilize it in all your graphic designs.

-

This font family is free in any type of designing project with your adobe accounts such as you can use many other typefaces in the font library. But without an Adobe account, you should talk to its designer for its license.

-

NOBEL-REGULAR.OTF, Nobel Regular, nobel, Nobel Regular, nobel, Nobel-Regular.otf, Windows, OTF, font The fonts presented on this website are their authors' property, and are either freeware, shareware, demo versions or public domain.

-

Nobel-Bold NOBEL-BOLD.OTF , Nobel Bold , nobel , Nobel Bold , nobel , Nobel-Bold.otf , Windows , OTF , font The fonts presented on this website are their authors' property, and are either freeware, shareware, demo versions or public domain.

-

Dtl nobel font family Browse a full collection of fronts from the dtl nobel font family. This family contains 8 fonts in styles such as t bold, t condensed bold, t light, t regular, t light italic, t condensed regular, t euro and t italic.

-

Fortunately, there are a lot of excellent font repositories that do go to great lengths to curate their collections. One of our favorites is Google Fonts, which offers over 900 font families in several languages, all of them free:

-

The great thing about WhatsApp is that it's constantly updating its offering. When we first collated this list, WhatsApp was best for personal use as it didn't have any business options available. Since launching WhatsApp for business it can now cater to a new clientele, allowing companies to communicate and respond with customers in real-time, provided they download the free WhatsApp for business App. We'd still recommend using WhatsApp for personal use though, given that the service is only available for Android users. However, given the ever-evolving nature of WhatsApp for Business, it may still be worth giving it a go.

-

Additionally, Viber offers a similar service as FaceTime and WhatsApp, in that you can call anyone internationally for free, as long as they are also using the Viber app. Granted, it's not as popular as WhatsApp or FaceTime, but it's free to download, which could make for an easy and affordable alternative.

-

Viber's international calling app is probably best suited for personal use. Given the more expensive rates for international calling and the fact that most of your clients probably don't have the Viber app, it's safe to say it's not ideal to run a business on it. However, if you're trying to make personal calls, it's safe to say it would be much easier to get them to download the app to avoid the cost. Still, you can always use our free VoIP comparison tool to find something a system that's more tailored to your needs.

-

All rights for the fonts given on this website reserved by their owners (authors, designers). The license given on the font page only represents received data. For detailed information, please, read the files (e.g., readme.txt) from archive or visit the website given by an author (designer) or contact with him if you have any doubt.
If there is no reported author (designer) or license, it means that there is no information on the given font, but it does not mean that the font is free.

-

By creating a free account at Free-Ebooks(Opens in a new window), you can download up to five free titles each month. If you want more, a $50 VIP plan grants you an unlimited number of free books for life. But the free account is a good way to get started. Just watch out for the special offers and promotions as you sign up for your account.

-

Select a specific book to see customer reviews and other information. Click the Get for free button to download an ebook or audiobook. You can read or play the book directly at the website or snag the free Google Books app for iOS/iPadOS(Opens in a new window) or Android(Opens in a new window) to access the title on your mobile device.

-

Yes, you can write a book in Microsoft Word, and it may even help you write your book faster and increase your writing focus, since you can save time formatting and setting up your book layout. You can write a book in Word by downloading a free book template, by using the standard book template Word offers, or by setting up your own (which is more time-consuming).

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Remo Software Activation Key 23.md b/spaces/cihyFjudo/fairness-paper-search/Remo Software Activation Key 23.md deleted file mode 100644 index 7c4332b53170c5bbba95c52d94fdd72931ec3683..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Remo Software Activation Key 23.md +++ /dev/null @@ -1,33 +0,0 @@ -
-

If the LAN cable is connected to a switch/hub in the network, and if the IP address is set accordingly on the TNC, then the transmission of data to and from the TNC can be started through the use of the machine's IP address with TNCremo, for example.

-

There are several possibilities depending on what is needed:

- TNCremo PLUS -
This allows you to display a live image of your control.
For more information, please refer to the following link:
-shop-floor/connected-machining

- TeleService -
This lets you display a live image of your control and operate it.

- StateMonitor -
This gives you fundamental information about machine availability, whether a program is running, or whether an error is shown. For more information, please refer to the following link:
-portal.com/en/software/machine-data-collection/

- SRI State Reporting Interface -
With its State Reporting Interface (SRI) software option, HEIDENHAIN offers an interface for easily making machine operating statuses available to a higher-level MES or ERP system.

-

remo software activation key 23


Download Ziphttps://tinurli.com/2uwk3g



-

Newer controls (TNC 4xx or later) feature a GoldCap capacitor, which ensures that the data are buffered while the battery is being changed. However, the battery should not be removed for an extended period of time. Older controls do not have a capacitor, and their data are not saved when the control is switched off.

-

The software of the test version and full version is identical. It is enabled via a hardware dongle that is ordinarily built into the programming station keyboard. Whether this kind of dongle or an external dongle is connected depends on the given driver. The driver can be downloaded from our website free of charge but is dependent on the model. If you are unsure, then simply install both drivers.

-

The postprocessor must always be purchased from the CAM system manufacturerer. Whether 2D, 3D (inclined machining), or 3D simultaneous machining is required depends on the machine, the NC software, and, of course, on the range of parts to be machined.

-

We try to list the most commonly encountered free software license onthis page, but cannot list them all; we'll try our best to answerquestions about free software licenses whether or not they are listedhere. The licenses are more or less in alphabetical order within eachsection.

-

If you have questions about free software licenses, you can emailusat .Because our resources are limited, we do not answer questions that aremeant to assist proprietary software development or distribution, andyou'll likely get an answer faster if you ask a specific question thatisn't already covered here or in ourFAQ. Wewelcome knowledgeablevolunteers who want to help answer licensing questions.

-

If you are contemplating writing a new license, please also contactus at . Theproliferation of different free software licenses is a significantproblem in the free software community today, both for users anddevelopers. We will do our best to help you find an existing freesoftware license that meets your needs.

-

Please note that GPLv3 is not compatible with GPLv2 by itself.However, most software released under GPLv2 allows you to use theterms of later versions of the GPL as well. When this is the case,you can use the code under GPLv3 to make the desired combination. Tolearn more about compatibility between GNU licenses,please see ourFAQ.

-

-

Please note that GPLv2 is, by itself, not compatible with GPLv3.However, most software released under GPLv2 allows you to use theterms of later versions of the GPL as well. When this is the case,you can use the code under GPLv3 to make the desired combination. Tolearn more about compatibility between GNU licenses,please see ourFAQ.

-

This is the latest version of the LGPL: a free software license, but nota strong copyleft license, because it permits linking with nonfreemodules. It is compatible with GPLv3. We recommend it for special circumstancesonly.

-

Please note that LGPLv3 is not compatible with GPLv2 by itself.However, most software released under GPLv2 allows you to use theterms of later versions of the GPL as well. When this is the case,you can use the code under GPLv3 to make the desired combination. Tolearn more about compatibility between GNU licenses,please see ourFAQ.

-

This is the previous version of the LGPL: a free software license,but not a strong copyleft license, because it permits linking withnonfree modules. It is compatible with GPLv2 and GPLv3. Wegenerally recommend the latest version of theLGPL, for specialcircumstances only. To learn more about how LGPLv2.1 iscompatible with other GNU licenses,please see ourFAQ.

-

This is a free software, copyleft license. Its terms effectivelyconsist of the terms of GPLv3, with an additional paragraph in section 13to allow users who interact with the licensed software over a network toreceive the source for that program. We recommend that developers considerusing the GNU AGPL for any software which will commonly be run over anetwork.

-

This is a lax, permissive free software license, compatible withthe GNU GPL, which we recommend GNU packages use for README and othersmall supporting files. All developers can feel free to use it insimilar situations.

-

This is a free software license, compatible with both GPLv2 andGPLv3. It is based on the modified BSDlicense, and adds a term expressly stating it does not grant youany patent licenses. Because of this, we encourage you to be carefulabout using software under this license; you should first considerwhether the licensor might want to sue you for patent infringement.If the developer is refusing users patent licenses to set up a trapfor you, it would be wise to avoid the program.

-

The eCos license version 2.0 is a GPL-compatible free softwarelicense. It consists of the GPL, plus an exception allowing linking tosoftware not under the GPL. This license has the same disadvantagesas the LGPL.

-

This is a free software license, and it is compatible withGPLv3. It is based on the Apache License2.0; the scope of the patent license has changed so that whenan organization's employee works on a project, the organizationdoes not have to license all of its patents to recipients. Thispatent license and the indemnification clause in section 9 makethis license incompatible with GPLv2.

-

This is a free software license, and compatible with the GNU GPL.The authors have assured us that developers who document changes asrequired by the GPL will also comply with the similar requirement inthis license.

-

In the United States, these licenses are supposed to be interpretedbased on what the author seems to intend. So they probably mean whatthey appear to mean. That would make them non-copyleft free softwarelicenses and compatible with the GNU GPL. However, an unlucky choiceof wording could give it a different meaning.

-

It's important to understand that the condition to distribute filesunder the MPL's terms only applies to the party that first creates anddistributes the Larger Work. If it applied to their recipients as well, itwould be a further restriction and incompatible with the GPL and AGPL.That said, when you make contributions to an existing project, we usuallyrecommend that you keep your changes under the same license,even when you're not required to do so. If you receive a work under a GNUlicense where some files are also under the MPL, you should only remove theMPL from those files when there's a strong reason to justify it.

-

Software under previous versions of the MPL can be upgraded to version2.0, but any software that isn't already available under one of thelisted GNU licenses must be marked as Incompatible With SecondaryLicenses. This means that software that's only available underprevious versions of the MPL is still incompatible with the GPL and AGPL.

-

Being in the public domain is not a license; rather, it means thematerial is not copyrighted and no license is needed. Practicallyspeaking, though, if a work is in the public domain, it might as wellhave an all-permissive non-copyleft free software license. Publicdomain material is compatible with the GNU GPL.

-

Previous versions of the SGI Free Software License B were not freesoftware licenses, despite their name. However, they all includedclauses that allow you to upgrade to new versions of the license, if youchoose to do so. As a result, if a piece of software was released underany version of the SGI Free License B, you can use it under the terms ofthis free version.

-

If you want to use files covered by this License Agreement in yourown software, that shouldn't be any problem, but we recommend thatyou also include a full copy of its text. Some of the files containalternative license terms which are nonfree, or no licensinginformation at all, so including a copy of the License Agreementwill help avoid confusion when others want to distribute yoursoftware. Of course, you'll also need to follow the conditions inthis License Agreement for distributing the files, but those arevery straightforward.

-

Please do not use this License Agreement for your own software. Ifyou want to use a lax permissive license for your project, please usethe Expat license for a small program and theApache 2.0 license for a substantial program. These are far morecommon, and widely recognized in the free software community.

-

This is a lax, permissive non-copyleft free software license, compatiblewith the GNU GPL. The license does provide the ability to licensepatents along with the software work, however, we still recommend theApache 2.0 license for avoiding patent treachery when choosing to putyour work under a lax license.

-

The WxWidgets license is a GPL-compatible free software license. Itconsists of the GNULesser GPL 2.0 or any later version, plus an additional permissionallowing binary distributions that use the library to be licensedunder terms of the distributor's choice (including proprietary). Itis a weak copyleft, even weaker than the LGPL, so we recommendit only in specialcircumstances.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/[Korean nuclear fusion reactor achieves 100 millionC for 30 seconds](1).md b/spaces/cihyFjudo/fairness-paper-search/[Korean nuclear fusion reactor achieves 100 millionC for 30 seconds](1).md deleted file mode 100644 index 495fd373c441a9baa84e9144738088c123427078..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/[Korean nuclear fusion reactor achieves 100 millionC for 30 seconds](1).md +++ /dev/null @@ -1,16 +0,0 @@ -
-

Select the 3D polyline to join: pick a 3Dpolyline
Select the other 3D polyline to join: pick a 3D polylinethat has a common endpoint with the first
3 segments added to the polyline.

-

Join 3d Polyline Autocad


Downloadhttps://tinurli.com/2uwhZM



-

The lines are created (in a .dxf) by a GPS unit in the field, and represent tracks needing to be mapped. Typically we would drive out a track and then drive back on the same track; this gives me two 3d polylines. Because of GPS errors it better practice to log the roads/ tracks twice. The lines can have from 2000 to 10000 vertices, so manually drawing is not an option. Any ideas? Thanks

-

Create a 3d object away from the two 3d objects you need to join, type SUBTRACT, select the two 3d objects as regions to subtract from, then select the created 3d object away from the two as the region to subtract to...

press enter

-

@James Maeding
Hi.
I'm using V21-2.02-01.
I've drawn 2 3DPOLY that have 3 vertex each, and with different Z values, but one vertex for each one of them was the same( X,Y,Z).
I used the JOIN command and it did joined them to one entity, kipping the joined entity as a 3DPOLY.
I did another test, Drawing PLOYLINE (x,y and elevation) and a 3DPOLY with different z values and one joint vertex, and when using the join command the joined object turned in to 3DPOLY and kept the z values (Elevation of the 2DPOLY turned to Z).

-

-

Try to implement this small trick into your work and you will see a great improvement in speed and quality of your drawings! If you want to learn more tricks concerning Polylines you can look at those:
Create Polyline around the Outside of Multiple Objects!
Edit Vertexes and use Linetype Polylines!
Coordinates of polyline? But How and Why?
Split polylines into equal segments!

-

This application joins and cleans-up polylines and line entities to create closed boundaries and to close gaps not seen visibly. The app includes 9 functions; each with additional settings and sub-routines for quick and automatic joining, trimming, cleaning and closing. With 9 user-defined settings.

1. Join Polylines Automatically
2. Checks Polyline Closure
3. Trims Extended Objects
4. Trims and Joins selected lines
5. Joins Multiple Polyline Endpoints
6. Auto Link
7. Joins Broken Tapered Polyline(s)
8. Closes Opened Polygon(s)
9. Trims Lines and Polylines in/out Polygon(s)


$50.00 to purchase from within the app using a Paypal account OR make a one-time purchase with a Debit or Credit Card through Paypal (no Paypal account required)

Note: This app uses a custom installer (and not the standard App Store installer).

-

There are various polyline types in AutoCAD®, the most common of which is usually the LWPolyline (lightweight polyline). It can be confusing because the LWPolyline and the old Polyline entity used way back in the early days of AutoCAD® are both shown as a "Polyline" in the Properties window. Chances are though, unless you're working on some nasty generated drawing, you're probably using LWPolylines.

-

The third polyline type is the 3D polyline, which as the name implies is a 3D version. Both the LWPolyline and the Polyline only permit the creation of geometry on a flat plane (UCS), but the 3D polyline allows points anywhere in 3D space.

-

Converting between the various types therefore has obvious difficulty, because going from 3D to 2D means you're going to have to remove some of the 3D information from the polyline, and AutoCAD® could interpret how to do this in many ways. Therefore historically there have not been native commands to convert between the polyline types.

-

The FLATTEN command is an option for making the polyline flat (i.e., visibly the same as before, but drawn as a 2D line on whatever UCS you're working on). I have had troubles with FLATTEN in the past however, because I think it does some odd stuff sometimes with merging lines and approximating... so I tend to avoid it where possible.

-

the tool with then ask you to select your first line segment - which if drawn using the line tool will be a single line - it will then ask to convert this segment to a polyline as shown below - say Yes

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cjayic/sovits-overwatch2/modules.py b/spaces/cjayic/sovits-overwatch2/modules.py deleted file mode 100644 index eb2d8e5f84c94a4f578280cef3d6327ac0d6a773..0000000000000000000000000000000000000000 --- a/spaces/cjayic/sovits-overwatch2/modules.py +++ /dev/null @@ -1,449 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x - -# modules from StarGANv2-VC - -class DownSample(nn.Module): - def __init__(self, layer_type): - super().__init__() - self.layer_type = layer_type - - def forward(self, x): - if self.layer_type == 'none': - return x - elif self.layer_type == 'timepreserve': - return F.avg_pool2d(x, (2, 1)) - elif self.layer_type == 'half': - return F.avg_pool2d(x, 2) - else: - raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type) - -class ResBlock3(nn.Module): - def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), - normalize=False, downsample='none'): - super().__init__() - self.actv = actv - self.normalize = normalize - self.downsample = DownSample(downsample) - self.learned_sc = dim_in != dim_out - self._build_weights(dim_in, dim_out) - - def _build_weights(self, dim_in, dim_out): - self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1) - self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1) - if self.normalize: - self.norm1 = nn.InstanceNorm2d(dim_in, affine=True) - self.norm2 = nn.InstanceNorm2d(dim_in, affine=True) - if self.learned_sc: - self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False) - - def _shortcut(self, x): - if self.learned_sc: - x = self.conv1x1(x) - if self.downsample: - x = self.downsample(x) - return x - - def _residual(self, x): - if self.normalize: - x = self.norm1(x) - x = self.actv(x) - x = self.conv1(x) - x = self.downsample(x) - if self.normalize: - x = self.norm2(x) - x = self.actv(x) - x = self.conv2(x) - return x - - def forward(self, x): - x = self._shortcut(x) + self._residual(x) - return x / math.sqrt(2) # unit variance diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/transformPen.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/transformPen.py deleted file mode 100644 index 2e572f612e6a29d0a782a0b278deaed9f98f5127..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/transformPen.py +++ /dev/null @@ -1,111 +0,0 @@ -from fontTools.pens.filterPen import FilterPen, FilterPointPen - - -__all__ = ["TransformPen", "TransformPointPen"] - - -class TransformPen(FilterPen): - - """Pen that transforms all coordinates using a Affine transformation, - and passes them to another pen. - """ - - def __init__(self, outPen, transformation): - """The 'outPen' argument is another pen object. It will receive the - transformed coordinates. The 'transformation' argument can either - be a six-tuple, or a fontTools.misc.transform.Transform object. - """ - super(TransformPen, self).__init__(outPen) - if not hasattr(transformation, "transformPoint"): - from fontTools.misc.transform import Transform - - transformation = Transform(*transformation) - self._transformation = transformation - self._transformPoint = transformation.transformPoint - self._stack = [] - - def moveTo(self, pt): - self._outPen.moveTo(self._transformPoint(pt)) - - def lineTo(self, pt): - self._outPen.lineTo(self._transformPoint(pt)) - - def curveTo(self, *points): - self._outPen.curveTo(*self._transformPoints(points)) - - def qCurveTo(self, *points): - if points[-1] is None: - points = self._transformPoints(points[:-1]) + [None] - else: - points = self._transformPoints(points) - self._outPen.qCurveTo(*points) - - def _transformPoints(self, points): - transformPoint = self._transformPoint - return [transformPoint(pt) for pt in points] - - def closePath(self): - self._outPen.closePath() - - def endPath(self): - self._outPen.endPath() - - def addComponent(self, glyphName, transformation): - transformation = self._transformation.transform(transformation) - self._outPen.addComponent(glyphName, transformation) - - -class TransformPointPen(FilterPointPen): - """PointPen that transforms all coordinates using a Affine transformation, - and passes them to another PointPen. - - >>> from fontTools.pens.recordingPen import RecordingPointPen - >>> rec = RecordingPointPen() - >>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5)) - >>> v = iter(rec.value) - >>> pen.beginPath(identifier="contour-0") - >>> next(v) - ('beginPath', (), {'identifier': 'contour-0'}) - >>> pen.addPoint((100, 100), "line") - >>> next(v) - ('addPoint', ((190, 205), 'line', False, None), {}) - >>> pen.endPath() - >>> next(v) - ('endPath', (), {}) - >>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0") - >>> next(v) - ('addComponent', ('a', ), {'identifier': 'component-0'}) - """ - - def __init__(self, outPointPen, transformation): - """The 'outPointPen' argument is another point pen object. - It will receive the transformed coordinates. - The 'transformation' argument can either be a six-tuple, or a - fontTools.misc.transform.Transform object. - """ - super().__init__(outPointPen) - if not hasattr(transformation, "transformPoint"): - from fontTools.misc.transform import Transform - - transformation = Transform(*transformation) - self._transformation = transformation - self._transformPoint = transformation.transformPoint - - def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): - self._outPen.addPoint( - self._transformPoint(pt), segmentType, smooth, name, **kwargs - ) - - def addComponent(self, baseGlyphName, transformation, **kwargs): - transformation = self._transformation.transform(transformation) - self._outPen.addComponent(baseGlyphName, transformation, **kwargs) - - -if __name__ == "__main__": - from fontTools.pens.basePen import _TestPen - - pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0)) - pen.moveTo((0, 0)) - pen.lineTo((0, 100)) - pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) - pen.closePath() diff --git a/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/suncc/stdatomic.h b/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/suncc/stdatomic.h deleted file mode 100644 index 0cf89e0f78d79d48b411f6e4a8450030aee757d7..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/suncc/stdatomic.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef COMPAT_ATOMICS_SUNCC_STDATOMIC_H -#define COMPAT_ATOMICS_SUNCC_STDATOMIC_H - -#include -#include -#include -#include - -#define ATOMIC_FLAG_INIT 0 - -#define ATOMIC_VAR_INIT(value) (value) - -#define atomic_init(obj, value) \ -do { \ - *(obj) = (value); \ -} while(0) - -#define kill_dependency(y) ((void)0) - -#define atomic_thread_fence(order) \ - __machine_rw_barrier(); - -#define atomic_signal_fence(order) \ - ((void)0) - -#define atomic_is_lock_free(obj) 0 - -typedef intptr_t atomic_flag; -typedef intptr_t atomic_bool; -typedef intptr_t atomic_char; -typedef intptr_t atomic_schar; -typedef intptr_t atomic_uchar; -typedef intptr_t atomic_short; -typedef intptr_t atomic_ushort; -typedef intptr_t atomic_int; -typedef intptr_t atomic_uint; -typedef intptr_t atomic_long; -typedef intptr_t atomic_ulong; -typedef intptr_t atomic_llong; -typedef intptr_t atomic_ullong; -typedef intptr_t atomic_wchar_t; -typedef intptr_t atomic_int_least8_t; -typedef intptr_t atomic_uint_least8_t; -typedef intptr_t atomic_int_least16_t; -typedef intptr_t atomic_uint_least16_t; -typedef intptr_t atomic_int_least32_t; -typedef intptr_t atomic_uint_least32_t; -typedef intptr_t atomic_int_least64_t; -typedef intptr_t atomic_uint_least64_t; -typedef intptr_t atomic_int_fast8_t; -typedef intptr_t atomic_uint_fast8_t; -typedef intptr_t atomic_int_fast16_t; -typedef intptr_t atomic_uint_fast16_t; -typedef intptr_t atomic_int_fast32_t; -typedef intptr_t atomic_uint_fast32_t; -typedef intptr_t atomic_int_fast64_t; -typedef intptr_t atomic_uint_fast64_t; -typedef intptr_t atomic_intptr_t; -typedef intptr_t atomic_uintptr_t; -typedef intptr_t atomic_size_t; -typedef intptr_t atomic_ptrdiff_t; -typedef intptr_t atomic_intmax_t; -typedef intptr_t atomic_uintmax_t; - -static inline void atomic_store(intptr_t *object, intptr_t desired) -{ - *object = desired; - __machine_rw_barrier(); -} - -#define atomic_store_explicit(object, desired, order) \ - atomic_store(object, desired) - -static inline intptr_t atomic_load(intptr_t *object) -{ - __machine_rw_barrier(); - return *object; -} - -#define atomic_load_explicit(object, order) \ - atomic_load(object) - -#define atomic_exchange(object, desired) \ - atomic_swap_ptr(object, desired) - -#define atomic_exchange_explicit(object, desired, order) \ - atomic_exchange(object, desired) - -static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *expected, - intptr_t desired) -{ - intptr_t old = *expected; - *expected = (intptr_t)atomic_cas_ptr(object, (void *)old, (void *)desired); - return *expected == old; -} - -#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \ - atomic_compare_exchange_strong(object, expected, desired) - -#define atomic_compare_exchange_weak(object, expected, desired) \ - atomic_compare_exchange_strong(object, expected, desired) - -#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \ - atomic_compare_exchange_weak(object, expected, desired) - -static inline intptr_t atomic_fetch_add(intptr_t *object, intptr_t operand) -{ - return atomic_add_ptr_nv(object, operand) - operand; -} - -#define atomic_fetch_sub(object, operand) \ - atomic_fetch_add(object, -(operand)) - -static inline intptr_t atomic_fetch_or(intptr_t *object, intptr_t operand) -{ - intptr_t old; - do { - old = atomic_load(object); - } while (!atomic_compare_exchange_strong(object, old, old | operand)); - return old; -} - -static inline intptr_t atomic_fetch_xor(intptr_t *object, intptr_t operand) -{ - intptr_t old; - do { - old = atomic_load(object); - } while (!atomic_compare_exchange_strong(object, old, old ^ operand)); - return old; -} - -static inline intptr_t atomic_fetch_and(intptr_t *object, intptr_t operand) -{ - intptr_t old; - do { - old = atomic_load(object); - } while (!atomic_compare_exchange_strong(object, old, old & operand)); - return old; -} - -#define atomic_fetch_add_explicit(object, operand, order) \ - atomic_fetch_add(object, operand) - -#define atomic_fetch_sub_explicit(object, operand, order) \ - atomic_fetch_sub(object, operand) - -#define atomic_fetch_or_explicit(object, operand, order) \ - atomic_fetch_or(object, operand) - -#define atomic_fetch_xor_explicit(object, operand, order) \ - atomic_fetch_xor(object, operand) - -#define atomic_fetch_and_explicit(object, operand, order) \ - atomic_fetch_and(object, operand) - -#define atomic_flag_test_and_set(object) \ - atomic_exchange(object, 1) - -#define atomic_flag_test_and_set_explicit(object, order) \ - atomic_flag_test_and_set(object) - -#define atomic_flag_clear(object) \ - atomic_store(object, 0) - -#define atomic_flag_clear_explicit(object, order) \ - atomic_flag_clear(object) - -#endif /* COMPAT_ATOMICS_SUNCC_STDATOMIC_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/doc/style.min.css b/spaces/colakin/video-generater/public/ffmpeg/doc/style.min.css deleted file mode 100644 index 6843fda57d03d5ed37c7d952248e6b042755e2df..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/doc/style.min.css +++ /dev/null @@ -1,23 +0,0 @@ -/*! -The MIT License (MIT) - -Copyright (c) 2014 Barbara Lepage - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - */body{background-color:#313131;color:#e6e6e6;text-align:justify}body, h1, h2, h3, h4, h5, h6{font-family:"Lucida Grande","Lucida Sans Unicode","Lucida Sans","Helvetica Neue",Helvetica,Verdana,Tahoma,sans-serif}a{color:#4cae4c}a strong{color:#e6e6e6}a:hover{color:#7fc77f}a:hover strong{color:#4cae4c}main{width:100% ! important;min-height:600px;margin:auto}h1, h2, h3, h4{font-weight:bold;text-align:left}h1, h2, h3{color:#bebebe}h1 strong, h2 strong, h3 strong{color:#e6e6e6}h4, h5, h6{color:#3c8b3c}h1{border-bottom:4px #bebebe solid;padding:20px 2%}h3{border-bottom:2px #bebebe solid;padding:15px 1%}h4{border-bottom:1px solid #e6e6e6;padding:10px 0;margin:20px 0;color:#e6e6e6}.list-group .list-group-item{background-color:#3e3e3e;border-color:black}.list-group.list-group-big .list-group-item{padding:25px}.list-group a.list-group-item{color:#7fc77f}.list-group a.list-group-item:hover{background-color:#313131;color:#4cae4c}.well{background-color:#242424;border-color:black;color:#bebebe}.well strong{color:#e6e6e6}.well code{background-color:#313131}.well hr{border-color:#3c8b3c}.well h3{margin:5px 0 15px 0;border:0;padding:0}.well a{color:#4cae4c}.well a.btn{color:white}.well small{display:block;padding:0 10px;font-style:italic}.well.example{padding-top:40px;margin-bottom:130px}.well.example pre{margin:50px;margin-bottom:30px;font-size:1.5em}.well.example .btn{margin-right:50px;margin-bottom:20px}.well.well-with-icon{min-height:136px}.well.well-with-icon .pull-right,.well.well-with-icon .pull-left{background-color:#4cae4c;color:#e6e6e6;padding:10px;border-radius:5px;margin:5px}.well.well-with-icon .pull-right{margin-left:20px}.well.well-with-icon .pull-left{margin-right:20px}a.well{display:block}a.well:hover{text-decoration:none;opacity:0.8}.info, .warning{margin:10px;padding:10px;background-color:#3e3e3e;color:#e6e6e6}.info code, .warning code{background-color:#313131}.info{border-left:10px #4cae4c solid}.warning{border-left:10px #ae4c4c solid}.with-icon{padding:30px}.with-icon .pull-left{padding-right:30px}.with-icon .pull-right{padding-left:30px}dd{margin-left:20px}code{background-color:#242424;color:#7fc77f;display:inline-block;margin:5px}.table{margin:20px 0;border-radius:4px}.table th,.table td,.table tr{border:1px solid #171717}.table tr th{background-color:#3e3e3e;border-bottom:2px solid #e6e6e6}.table tr:nth-child(odd){background-color:#242424}#sidebar-wrapper, .navbar{background-color:#171717;overflow-x:hidden}#sidebar-wrapper .sidebar-brand img,#sidebar-wrapper .navbar-brand img, .navbar .sidebar-brand img, .navbar .navbar-brand img{opacity:0.6;margin-right:8px}#sidebar-wrapper .sidebar-brand:hover,#sidebar-wrapper .navbar-brand:hover, .navbar .sidebar-brand:hover, .navbar .navbar-brand:hover{color:#fff}#sidebar-wrapper .sidebar-brand:hover img,#sidebar-wrapper .navbar-brand:hover img, .navbar .sidebar-brand:hover img, .navbar .navbar-brand:hover img{opacity:1}#sidebar-wrapper .sidebar-nav li ul, .navbar .sidebar-nav li ul{list-style-type:none;padding:0}#sidebar-wrapper .sidebar-nav li ul li, .navbar .sidebar-nav li ul li{line-height:20px}#sidebar-wrapper .sidebar-nav li ul li a, .navbar .sidebar-nav li ul li a{padding-left:20px}.content-header{height:auto;background-color:#242424}.content-header h1{color:#e6e6e6;display:block;margin:0;margin-bottom:20px;line-height:normal;border-bottom:none}#download h4, #index h4{margin-top:180px}#download h4.first, #index h4.first{margin-top:20px}#download h4.first small, #index h4.first small{color:inherit;font-size:1em}#download .btn-download-wrapper, #index .btn-download-wrapper{text-align:center;margin:160px auto}#download .btn-download-wrapper .btn, #index .btn-download-wrapper .btn{font-size:3em;padding:3%;display:inline-block;margin-bottom:5px}#download .btn-download-wrapper small, #index .btn-download-wrapper small{display:block;font-size:0.4em}#download h2.description, #index h2.description{color:#e6e6e6;font-size:2em;font-weight:bold;margin:120px 50px;line-height:2em}#download h2.description .label, #index h2.description .label{font-size:0.5em}#download .btn-download-wrapper{margin:40px auto}#download .os-selector{text-align:center;color:#e6e6e6;margin:30px 0}#download .os-selector a.btn-build{color:#e6e6e6;display:block;padding:20px;border-radius:2px}#download .os-selector .btn-build[href="#build-linux"]{background-color:#e43}#download .os-selector .btn-build[href="#build-linux"]:hover{color:#e43;background-color:#e6e6e6}#download .os-selector .btn-build[href="#build-windows"]{background-color:#06a}#download .os-selector .btn-build[href="#build-windows"]:hover{color:#06a;background-color:#e6e6e6}#download .os-selector .btn-build[href="#build-mac"]{background-color:darkgrey}#download .os-selector .btn-build[href="#build-mac"]:hover{color:darkgrey;background-color:#e6e6e6}#download .os-selector .tab-content{margin-top:20px}#download .os-selector #build-linux h3{color:#e43}#download .os-selector #build-windows h3{color:#06a}#download .os-selector #build-mac h3{color:darkgrey}footer{background-color:#242424;border-top:1px #101010 solid;padding:20px 0%}footer a{display:block}footer img[alt="FFmpeg"]{width:50%;display:block;margin:auto} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vorbisdsp_init_arm.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vorbisdsp_init_arm.c deleted file mode 100644 index acda34f46841fffe720fb7d2f609f4256766d972..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vorbisdsp_init_arm.c +++ /dev/null @@ -1,37 +0,0 @@ -/* - * ARM NEON optimised DSP functions - * Copyright (c) 2008 Mans Rullgard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/attributes.h" -#include "libavutil/cpu.h" -#include "libavutil/arm/cpu.h" -#include "libavcodec/vorbisdsp.h" - -void ff_vorbis_inverse_coupling_neon(float *mag, float *ang, - ptrdiff_t blocksize); - -av_cold void ff_vorbisdsp_init_arm(VorbisDSPContext *c) -{ - int cpu_flags = av_get_cpu_flags(); - - if (have_neon(cpu_flags)) { - c->vorbis_inverse_coupling = ff_vorbis_inverse_coupling_neon; - } -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cfhddsp.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cfhddsp.c deleted file mode 100644 index a141db5246999c35e52d251f54d3125483bbb710..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cfhddsp.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2015-2016 Kieran Kunhya - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/attributes.h" -#include "libavutil/common.h" - -#include "cfhddsp.h" - -static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride, - const int16_t *low, ptrdiff_t low_stride, - const int16_t *high, ptrdiff_t high_stride, - int len, int clip) -{ - int16_t tmp; - int i; - - tmp = (11*low[0*low_stride] - 4*low[1*low_stride] + low[2*low_stride] + 4) >> 3; - output[(2*0+0)*out_stride] = (tmp + high[0*high_stride]) >> 1; - if (clip) - output[(2*0+0)*out_stride] = av_clip_uintp2_c(output[(2*0+0)*out_stride], clip); - - tmp = ( 5*low[0*low_stride] + 4*low[1*low_stride] - low[2*low_stride] + 4) >> 3; - output[(2*0+1)*out_stride] = (tmp - high[0*high_stride]) >> 1; - if (clip) - output[(2*0+1)*out_stride] = av_clip_uintp2_c(output[(2*0+1)*out_stride], clip); - - for (i = 1; i < len - 1; i++) { - tmp = (low[(i-1)*low_stride] - low[(i+1)*low_stride] + 4) >> 3; - output[(2*i+0)*out_stride] = (tmp + low[i*low_stride] + high[i*high_stride]) >> 1; - if (clip) - output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip); - - tmp = (low[(i+1)*low_stride] - low[(i-1)*low_stride] + 4) >> 3; - output[(2*i+1)*out_stride] = (tmp + low[i*low_stride] - high[i*high_stride]) >> 1; - if (clip) - output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip); - } - - tmp = ( 5*low[i*low_stride] + 4*low[(i-1)*low_stride] - low[(i-2)*low_stride] + 4) >> 3; - output[(2*i+0)*out_stride] = (tmp + high[i*high_stride]) >> 1; - if (clip) - output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip); - - tmp = (11*low[i*low_stride] - 4*low[(i-1)*low_stride] + low[(i-2)*low_stride] + 4) >> 3; - output[(2*i+1)*out_stride] = (tmp - high[i*high_stride]) >> 1; - if (clip) - output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip); -} - -static void vert_filter(int16_t *output, ptrdiff_t out_stride, - const int16_t *low, ptrdiff_t low_stride, - const int16_t *high, ptrdiff_t high_stride, - int width, int height) -{ - for (int i = 0; i < width; i++) { - filter(output, out_stride, low, low_stride, high, high_stride, height, 0); - low++; - high++; - output++; - } -} - -static void horiz_filter(int16_t *output, ptrdiff_t ostride, - const int16_t *low, ptrdiff_t lstride, - const int16_t *high, ptrdiff_t hstride, - int width, int height) -{ - for (int i = 0; i < height; i++) { - filter(output, 1, low, 1, high, 1, width, 0); - low += lstride; - high += hstride; - output += ostride * 2; - } -} - -static void horiz_filter_clip(int16_t *output, const int16_t *low, const int16_t *high, - int width, int clip) -{ - filter(output, 1, low, 1, high, 1, width, clip); -} - -static void horiz_filter_clip_bayer(int16_t *output, const int16_t *low, const int16_t *high, - int width, int clip) -{ - filter(output, 2, low, 1, high, 1, width, clip); -} - -av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer) -{ - c->horiz_filter = horiz_filter; - c->vert_filter = vert_filter; - - if (bayer) - c->horiz_filter_clip = horiz_filter_clip_bayer; - else - c->horiz_filter_clip = horiz_filter_clip; - -#if ARCH_X86 - ff_cfhddsp_init_x86(c, depth, bayer); -#endif -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/codec_par.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/codec_par.h deleted file mode 100644 index f51d27c5908449354badf3e00cf5e3002a4f97ac..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/codec_par.h +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Codec parameters public API - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_CODEC_PAR_H -#define AVCODEC_CODEC_PAR_H - -#include - -#include "libavutil/avutil.h" -#include "libavutil/channel_layout.h" -#include "libavutil/rational.h" -#include "libavutil/pixfmt.h" - -#include "codec_id.h" - -/** - * @addtogroup lavc_core - * @{ - */ - -enum AVFieldOrder { - AV_FIELD_UNKNOWN, - AV_FIELD_PROGRESSIVE, - AV_FIELD_TT, ///< Top coded_first, top displayed first - AV_FIELD_BB, ///< Bottom coded first, bottom displayed first - AV_FIELD_TB, ///< Top coded first, bottom displayed first - AV_FIELD_BT, ///< Bottom coded first, top displayed first -}; - -/** - * This struct describes the properties of an encoded stream. - * - * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must - * be allocated with avcodec_parameters_alloc() and freed with - * avcodec_parameters_free(). - */ -typedef struct AVCodecParameters { - /** - * General type of the encoded data. - */ - enum AVMediaType codec_type; - /** - * Specific type of the encoded data (the codec used). - */ - enum AVCodecID codec_id; - /** - * Additional information about the codec (corresponds to the AVI FOURCC). - */ - uint32_t codec_tag; - - /** - * Extra binary data needed for initializing the decoder, codec-dependent. - * - * Must be allocated with av_malloc() and will be freed by - * avcodec_parameters_free(). The allocated size of extradata must be at - * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding - * bytes zeroed. - */ - uint8_t *extradata; - /** - * Size of the extradata content in bytes. - */ - int extradata_size; - - /** - * - video: the pixel format, the value corresponds to enum AVPixelFormat. - * - audio: the sample format, the value corresponds to enum AVSampleFormat. - */ - int format; - - /** - * The average bitrate of the encoded data (in bits per second). - */ - int64_t bit_rate; - - /** - * The number of bits per sample in the codedwords. - * - * This is basically the bitrate per sample. It is mandatory for a bunch of - * formats to actually decode them. It's the number of bits for one sample in - * the actual coded bitstream. - * - * This could be for example 4 for ADPCM - * For PCM formats this matches bits_per_raw_sample - * Can be 0 - */ - int bits_per_coded_sample; - - /** - * This is the number of valid bits in each output sample. If the - * sample format has more bits, the least significant bits are additional - * padding bits, which are always 0. Use right shifts to reduce the sample - * to its actual size. For example, audio formats with 24 bit samples will - * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32. - * To get the original sample use "(int32_t)sample >> 8"." - * - * For ADPCM this might be 12 or 16 or similar - * Can be 0 - */ - int bits_per_raw_sample; - - /** - * Codec-specific bitstream restrictions that the stream conforms to. - */ - int profile; - int level; - - /** - * Video only. The dimensions of the video frame in pixels. - */ - int width; - int height; - - /** - * Video only. The aspect ratio (width / height) which a single pixel - * should have when displayed. - * - * When the aspect ratio is unknown / undefined, the numerator should be - * set to 0 (the denominator may have any value). - */ - AVRational sample_aspect_ratio; - - /** - * Video only. The order of the fields in interlaced video. - */ - enum AVFieldOrder field_order; - - /** - * Video only. Additional colorspace characteristics. - */ - enum AVColorRange color_range; - enum AVColorPrimaries color_primaries; - enum AVColorTransferCharacteristic color_trc; - enum AVColorSpace color_space; - enum AVChromaLocation chroma_location; - - /** - * Video only. Number of delayed frames. - */ - int video_delay; - -#if FF_API_OLD_CHANNEL_LAYOUT - /** - * Audio only. The channel layout bitmask. May be 0 if the channel layout is - * unknown or unspecified, otherwise the number of bits set must be equal to - * the channels field. - * @deprecated use ch_layout - */ - attribute_deprecated - uint64_t channel_layout; - /** - * Audio only. The number of audio channels. - * @deprecated use ch_layout.nb_channels - */ - attribute_deprecated - int channels; -#endif - /** - * Audio only. The number of audio samples per second. - */ - int sample_rate; - /** - * Audio only. The number of bytes per coded audio frame, required by some - * formats. - * - * Corresponds to nBlockAlign in WAVEFORMATEX. - */ - int block_align; - /** - * Audio only. Audio frame size, if known. Required by some formats to be static. - */ - int frame_size; - - /** - * Audio only. The amount of padding (in samples) inserted by the encoder at - * the beginning of the audio. I.e. this number of leading decoded samples - * must be discarded by the caller to get the original audio without leading - * padding. - */ - int initial_padding; - /** - * Audio only. The amount of padding (in samples) appended by the encoder to - * the end of the audio. I.e. this number of decoded samples must be - * discarded by the caller from the end of the stream to get the original - * audio without any trailing padding. - */ - int trailing_padding; - /** - * Audio only. Number of samples to skip after a discontinuity. - */ - int seek_preroll; - - /** - * Audio only. The channel layout and number of channels. - */ - AVChannelLayout ch_layout; -} AVCodecParameters; - -/** - * Allocate a new AVCodecParameters and set its fields to default values - * (unknown/invalid/0). The returned struct must be freed with - * avcodec_parameters_free(). - */ -AVCodecParameters *avcodec_parameters_alloc(void); - -/** - * Free an AVCodecParameters instance and everything associated with it and - * write NULL to the supplied pointer. - */ -void avcodec_parameters_free(AVCodecParameters **par); - -/** - * Copy the contents of src to dst. Any allocated fields in dst are freed and - * replaced with newly allocated duplicates of the corresponding fields in src. - * - * @return >= 0 on success, a negative AVERROR code on failure. - */ -int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src); - -/** - * This function is the same as av_get_audio_frame_duration(), except it works - * with AVCodecParameters instead of an AVCodecContext. - */ -int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes); - -/** - * @} - */ - -#endif // AVCODEC_CODEC_PAR_H diff --git a/spaces/compasspathways/Sentiment3D/app.py b/spaces/compasspathways/Sentiment3D/app.py deleted file mode 100644 index 5e238f5e106f457a54d6a70f8dec022210072454..0000000000000000000000000000000000000000 --- a/spaces/compasspathways/Sentiment3D/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import gradio as gr -import pandas as pd -from sentiment3d import Sentiment3D - -s3d = Sentiment3D() - -TITLE = "COMPASS Pathways: 3D Sentiment Model" -EXAMPLES = [ - "This is so awesome!", - "You're driving me up the wall!", - "I'm so lonely I could cry.", - "I'm not feeling very sad at all.", - "A day without sunshine is like, you know, night.", - "Yes, that's how I feel [laughing].", - "Yes, that's how I feel [sobbing].", - "Now I hear what you're sayin' 😀", - "Now I hear what you're sayin' 🙁", -] - - -def sentiment(text, state): - sent = s3d(text) - res = dict(text=text, valence=sent['valence'], arousal=sent['arousal'], confidence=sent['confidence'], words=len(text.split())) - #if clear_history: - # state = [] - if state == None: - state = [] - state.append(res) - df = pd.DataFrame(state) - res_txt = [ - f"{r['text']}: \n valence = {r['valence']:0.3f}, arousal = {r['arousal']:0.3f}, confidence = {r['confidence']:0.3f}" - for r in state - ] - return "\n".join(res_txt), df, df, df, state - - -iface = gr.Interface( - fn=sentiment, - inputs=[gr.Textbox(lines=1, placeholder="Text for 3d sentiment..."), "state"], - outputs=[ - gr.Textbox(lines=5, max_lines=5, label="Results"), - gr.ScatterPlot( - x="valence", - y="arousal", - tooltip="text", - size="words", - size_legend_position="none", - interactive=False, - x_lim=[-1.05, 1.05], - y_lim=[-1.05, 1.05], - ), - gr.ScatterPlot( - x="valence", - y="confidence", - tooltip="text", - size="words", - size_legend_position="none", - interactive=False, - x_lim=[-1.05, 1.05], - y_lim=[-1.05, 1.05], - ), - gr.ScatterPlot( - x="arousal", - y="confidence", - tooltip="text", - size="words", - size_legend_position="none", - interactive=False, - x_lim=[-1.05, 1.05], - y_lim=[-1.05, 1.05], - ), - "state", - ], - title=TITLE, - examples=EXAMPLES -) -iface.launch() \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Talking Tom Hero Dash Full APK - The Most Fun and Addictive Endless Runner Game.md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy Talking Tom Hero Dash Full APK - The Most Fun and Addictive Endless Runner Game.md deleted file mode 100644 index 19dd80db7f2fd9c267f41f723fe7d338ac5d0b70..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Talking Tom Hero Dash Full APK - The Most Fun and Addictive Endless Runner Game.md +++ /dev/null @@ -1,120 +0,0 @@ - -

Talking Tom Hero Dash Full APK: A Fun and Action-Packed Game for Android

-

Do you love running games? Do you enjoy playing with cute and funny characters? Do you want to experience an epic adventure with amazing graphics and sound effects? If you answered yes to any of these questions, then you should try Talking Tom Hero Dash, one of the most popular games on Google Play Store.

-

talking tom hero dash full apk


Download File ⚹⚹⚹ https://urlca.com/2uOgat



-

Talking Tom Hero Dash is a fun and action-packed game that features your favorite characters from the Talking Tom series. You can play as Tom, Angela, Hank, Ginger, or Ben, each with their own superpowers and vehicles. Your mission is to run through different worlds, collect coins and power-ups, defeat raccoon bosses, save your friends, and rebuild your city.

-

Talking Tom Hero Dash is more than just a running game. It also has elements of strategy, customization, humor, and social interaction. You can unlock new outfits and vehicles for your heroes, upgrade your city with cool buildings and decorations, watch funny videos of your heroes' antics, and share your achievements with your friends online.

-

If you want to enjoy all these features and more, you should download Talking Tom Hero Dash Full APK, which is the full version of the game that includes all the updates and features. You can download it for free from a trusted source and install it on your Android device in a few simple steps. Here's how:

-

How to Download Talking Tom Hero Dash Full APK

-

Step 1: Enable Unknown Sources on Your Device

-

Before you can install the APK file, you need to allow your device to install apps from sources other than Google Play Store. To do this, go to your device's settings and look for the security or privacy option. Then, find the option that says "Unknown sources" or "Allow installation of apps from unknown sources" and enable it. You may see a warning message that says installing apps from unknown sources can harm your device, but don't worry, as long as you download the APK file from a reliable source, you should be fine.

-

Step 2: Download the APK File from a Trusted Source

-

Now that you have enabled unknown sources, you can download the APK file from a trusted source. There are many websites that offer APK files for free, but not all of them are safe and reliable. Some of them may contain malware or viruses that can harm your device or steal your personal information. To avoid this, you should only download the APK file from a reputable website that has positive reviews and ratings from other users. One such website is APKPure.com, which is one of the most popular and trusted sources for APK files. You can download Talking Tom Hero Dash Full APK from this link: https://apkpure.com/talking-tom-hero-dash-run-game/com.outfit7.herodash/download?from=details.

-

Step 3: Install and Launch the Game

-

Once you have downloaded the APK file, you can install it on your device by tapping on it and following the instructions on the screen. The installation process may take a few minutes, depending on your device's speed and memory. After the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You may see a message that asks you to grant some permissions to the game, such as access to your storage, camera, microphone, or location. You can allow or deny these permissions as you wish, but keep in mind that some features of the game may not work properly if you deny them.

-

How to Play Talking Tom Hero Dash

-

Choose Your Hero

-

When you start the game, you will see a screen that shows you the five heroes that you can play as: Tom, Angela, Hank, Ginger, or Ben. Each hero has their own superpower and vehicle that they can use in the game. For example, Tom can fly with his jetpack, Angela can use her pink scooter, Hank can shoot lasers with his glasses, Ginger can throw fireballs with his slingshot, and Ben can hack computers with his laptop. You can choose any hero that you like by tapping on their picture. You can also switch heroes anytime during the game by tapping on the hero icon at the top left corner of the screen.

-

talking tom hero dash mod apk unlimited money
-talking tom hero dash apk download for android
-talking tom hero dash hack apk latest version
-talking tom hero dash game free download apk
-talking tom hero dash offline apk mod
-talking tom hero dash apk pure no ads
-talking tom hero dash full unlocked apk
-talking tom hero dash apk rexdl premium
-talking tom hero dash mod apk revdl unlimited coins
-talking tom hero dash apk obb data file
-talking tom hero dash apk mirror pro
-talking tom hero dash mod apk android 1
-talking tom hero dash apk uptodown update
-talking tom hero dash hack apk 2023 download
-talking tom hero dash full version apk free
-talking tom hero dash mod apk happymod gems
-talking tom hero dash apk mod menu vip
-talking tom hero dash apk apkpure original
-talking tom hero dash hack apk ios devices
-talking tom hero dash full game apk online
-talking tom hero dash mod apk an1.com cheats
-talking tom hero dash apk for pc windows 10
-talking tom hero dash hack apk unlimited everything
-talking tom hero dash full hd apk graphics
-talking tom hero dash mod apk 4.0.0.4275 latest
-talking tom hero dash apk mob.org direct link
-talking tom hero dash hack tool apk generator
-talking tom hero dash full cracked apk download
-talking tom hero dash modded apk no root required
-talking tom hero dash original apk file size
-talking tom hero dash hacked version apk features
-talking tom hero dash full gameplay apk video
-talking tom hero dash mod money and gems apk
-talking tom hero dash official site apk installer
-talking tom hero dash unlimited coins and keys apk

-

Run, Jump, and Slide Through Different Worlds

-

The main gameplay of Talking Tom Hero Dash is similar to other running games such as Subway Surfers, Temple Run, or Sonic Dash. You have to control your hero's movements by swiping left or right to change lanes, swiping up to jump over obstacles, and swiping down to slide under them. You have to avoid crashing into obstacles such as cars, buses, trains, barrels, fences, or raccoons. If you crash into an obstacle three times in a row, you will lose the game and have to start over.

-

The game has different worlds that you can run through, such as city streets, tropical islands, Chinese temples, snowy mountains, or space stations. Each world has its own theme and challenges that make it unique and fun. You can unlock new worlds by collecting enough stars in each level. Stars are awarded based on how far you run, how many coins you collect, how many raccoons you defeat, and how many friends you save.

-

Collect Coins, Power-Ups, and Rewards

-

As you run through the worlds, you will see coins scattered along the way. You can collect these coins by running over them or using power-ups such as magnets that attract them to you. Coins are used to buy new outfits and vehicles for your heroes or upgrade your city with new buildings and decorations.

-

You will also see power-ups that appear randomly on the road. These power-ups can help you run faster, avoid obstacles, or defeat raccoons. Some of the power-ups are shields that protect you from crashes, rockets that launch you into the air, or helmets that make you invincible. You can activate these power-ups by tapping on them when they appear on the screen.

-

Besides coins and power-ups, you will also see rewards that you can collect by completing certain tasks or achievements. These rewards include gems, chests, stickers, or cards. Gems are used to revive your hero if you lose the game or to unlock new worlds faster. Chests contain random items such as coins, gems, power-ups, or outfits. Stickers are used to decorate your city with graffiti or posters. Cards are used to unlock new heroes or upgrade their abilities.

-

Defeat Raccoon Bosses and Save Your Friends

-

The main villains of Talking Tom Hero Dash are the raccoons, who have invaded your city and kidnapped your friends. You have to fight against them and rescue your friends in each world. The raccoons come in different sizes and colors, and they have different weapons and attacks. Some of them throw bombs, some of them shoot lasers, and some of them ride motorcycles or helicopters. You have to dodge their attacks and hit them with your superpower or power-ups until they are defeated.

-

At the end of each world, you will face a raccoon boss, who is bigger and stronger than the regular raccoons. The raccoon boss has a health bar that shows how much damage you have done to him. You have to hit him multiple times with your superpower or power-ups until his health bar is empty. Once you defeat the raccoon boss, you will free one of your friends who has been trapped in a cage. You can then play as that friend in the next world.

-

Customize Your Heroes and Their Vehicles

-

One of the most fun features of Talking Tom Hero Dash is that you can customize your heroes and their vehicles with different outfits and accessories. You can change their clothes, hats, glasses, shoes, masks, or capes. You can also change their vehicles' colors, wheels, stickers, or lights. You can buy these items with coins or gems, or get them from chests or cards. You can also mix and match different items to create your own unique style.

-

Customizing your heroes and their vehicles not only makes them look cool, but also gives them some advantages in the game. For example, some outfits give you extra coins or gems, some vehicles give you extra speed or power-ups, and some combinations give you special effects or bonuses. You can see these benefits by tapping on the item's icon before buying it.

-

How Does Talking Tom Hero Dash Compare with Other Games?

-

Talking Tom Hero Dash is not the only running game available on Google Play Store. There are many other games that have similar gameplay and features, such as Subway Surfers, Temple Run, or Sonic Dash. How does Talking Tom Hero Dash compare with these games? Here is a table that shows some of the differences and similarities between them:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
GameGraphicsGameplayRatings
Talking Tom Hero Dash3D cartoon style with bright colors and detailed animationsRun through different worlds with various themes and challenges; collect coins and power-ups; defeat raccoon bosses and save your friends; customize your heroes and their vehicles; rebuild your city4.5 stars out of 5 based on 2 million reviews
Subway Surfers3D cartoon style with vibrant colors and smooth animationsRun through different cities with diverse cultures and landmarks; collect coins and power-ups; dodge trains and obstacles; unlock new characters and boards; join seasonal events4.4 stars out of 5 based on 36 million reviews
Temple Run3D realistic style with dark colors and dynamic shadowsRun through ancient temples with mysterious secrets and dangers; collect coins and power-ups; avoid traps and obstacles; unlock new characters and abilities; escape from evil monkeys4.1 stars out of 5 based on 5 million reviews
Sonic Dash3D cartoon style with vivid colors and fast-paced animationsRun through iconic locations from the Sonic universe; collect rings and power-ups; dash and spin to destroy enemies; unlock new characters and skills; join epic boss battles4.4 stars out of 5 based on 5 million reviews -
-

As you can see, Talking Tom Hero Dash has some advantages and disadvantages compared to other games. It has better graphics, more gameplay options, and higher ratings than Temple Run, but it has less variety, more ads, and lower ratings than Subway Surfers or Sonic Dash. Ultimately, the choice of which game to play depends on your personal preference and taste. However, if you are looking for a fun and action-packed game that features your favorite Talking Tom characters, then you should definitely give Talking Tom Hero Dash a try.

-

Conclusion

-

Talking Tom Hero Dash is a fun and action-packed game that you can download and play on your Android device for free. It features your favorite Talking Tom characters as superheroes who run through different worlds, collect coins and power-ups, defeat raccoon bosses, save their friends, and rebuild their city. You can also customize your heroes and their vehicles with different outfits and accessories, and share your achievements with your friends online. Talking Tom Hero Dash is a game that will keep you entertained and engaged for hours.

-

If you want to download Talking Tom Hero Dash Full APK, which is the full version of the game that includes all the updates and features, you can follow the simple steps that we have explained in this article. You just need to enable unknown sources on your device, download the APK file from a trusted source, install and launch the game, and enjoy. It's that easy.

-

So what are you waiting for? Download Talking Tom Hero Dash Full APK today and join the adventure with your favorite Talking Tom heroes. You won't regret it.

-

FAQs

-

Here are some frequently asked questions that you might have about Talking Tom Hero Dash or APK files:

-
    -
  • What is an APK file?
  • -
  • An APK file is an Android Package file that contains all the files and data needed to install an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac.
  • -
  • Why do I need to download an APK file instead of installing the app from Google Play Store?
  • -
  • Sometimes, you might want to download an APK file instead of installing the app from Google Play Store for various reasons. For example, you might want to access an app that is not available in your region or country, or you might want to get the latest version of an app that has not been updated on Google Play Store yet, or you might want to get the full version of an app that has some features locked or restricted on Google Play Store.
  • -
  • Is it safe to download and install APK files?
  • -
  • It depends on where you download the APK files from. Some websites may offer APK files that are infected with malware or viruses that can harm your device or steal your personal information. To avoid this, you should only download APK files from reputable websites that have positive reviews and ratings from other users. You should also scan the APK files with an antivirus software before installing them.
  • -
  • How do I update an app that I installed from an APK file?
  • -
  • If you installed an app from an APK file, you will not receive automatic updates from Google Play Store. You will have to manually download and install the new version of the APK file from the same source that you got it from. Alternatively, you can uninstall the app and install it from Google Play Store if it is available there.
  • -
  • How do I uninstall an app that I installed from an APK file?
  • -
  • You can uninstall an app that you installed from an APK file in the same way that you uninstall any other app on your device. You can go to your device's settings and look for the apps or applications option. Then, find the app that you want to uninstall and tap on it. You will see an option that says "Uninstall" or "Remove". Tap on it and confirm your action.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Texas Holdem Poker Online Mod Apk Play Free Poker Games Anytime Anywhere.md b/spaces/congsaPfin/Manga-OCR/logs/Texas Holdem Poker Online Mod Apk Play Free Poker Games Anytime Anywhere.md deleted file mode 100644 index 105961295b4c5aeef998bb3a56fa9a8ce05567cf..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Texas Holdem Poker Online Mod Apk Play Free Poker Games Anytime Anywhere.md +++ /dev/null @@ -1,125 +0,0 @@ - -

Texas Holdem Poker Online Mod APK: How to Play and Win

-

Do you love playing poker but don't have the time or money to visit a casino? Do you want to enjoy the thrill of winning big without risking your real cash? If you answered yes to any of these questions, then you should try Texas Holdem Poker Online Mod APK. This is a modified version of the popular card game that lets you play online with other players from around the world. You can also get unlimited money, chips, and coins to bet as much as you want. In this article, we will show you how to download, install, and play Texas Holdem Poker Online Mod APK. We will also give you some tips on how to win more often and have more fun.

-

Introduction

-

What is Texas Holdem Poker Online Mod APK?

-

Texas Holdem Poker Online Mod APK is a hacked version of the original game that gives you access to some features that are not available in the official app. For example, you can get unlimited money, chips, and coins to play with. You can also unlock all the game modes, tables, and tournaments that are otherwise restricted or require in-app purchases. You can also enjoy faster loading times, smoother gameplay, and better graphics. With Texas Holdem Poker Online Mod APK, you can experience the best of online poker without spending a dime.

-

texas holdem poker online mod apk


Downloadhttps://urlca.com/2uOcda



-

Why should you play Texas Holdem Poker Online Mod APK?

-

There are many reasons why you should play Texas Holdem Poker Online Mod APK. Here are some of them:

-
    -
  • You can play online with millions of other players from different countries and regions.
  • -
  • You can choose from various game modes, such as cash games, tournaments, sit and go, and more.
  • -
  • You can customize your avatar, profile, and table settings according to your preferences.
  • -
  • You can chat with other players, send gifts, and make friends.
  • -
  • You can learn the rules and strategies of Texas Holdem Poker from tutorials, tips, and guides.
  • -
  • You can improve your skills, confidence, and ranking by playing regularly.
  • -
  • You can have fun, relax, and relieve stress by playing poker anytime, anywhere.
  • -
-

How to download and install Texas Holdem Poker Online Mod APK?

-

Step 1: Find a reliable source for the mod apk file

-

The first step to download and install Texas Holdem Poker Online Mod APK is to find a trustworthy website that offers the mod apk file. You can use Google or any other search engine to look for one. However, be careful not to download from shady or suspicious sites that may contain viruses or malware. One of the best sources for the mod apk file is Apkloli.com, which is a website that offers many popular card games. You can get the free download of Texas Holdem Poker Online Mod APK version v123.1.18 here.

-

Step 2: Enable unknown sources on your device

-

The next step is to enable unknown sources on your device. This is necessary

because the mod apk file is not from the official Google Play Store. To do this, go to your device's settings and look for the security or privacy option. Then, find the unknown sources option and toggle it on. This will allow you to install apps from sources other than the Google Play Store.

-

Step 3: Download and install the mod apk file

-

The final step is to download and install the mod apk file. To do this, go to the website where you found the mod apk file and click on the download button. Wait for the file to be downloaded to your device. Then, locate the file in your device's storage and tap on it. Follow the instructions on the screen to complete the installation process. Once the installation is done, you can launch the app and start playing Texas Holdem Poker Online Mod APK.

-

How to play Texas Holdem Poker Online Mod APK?

-

Step 1: Create an account or log in with Facebook

-

When you open the app for the first time, you will be asked to create an account or log in with Facebook. You can choose either option depending on your preference. Creating an account will require you to enter your username, email, and password. Logging in with Facebook will require you to grant permission to access your profile information. Either way, you will be able to access all the features of Texas Holdem Poker Online Mod APK once you are logged in.

-

Step 2: Choose a game mode and a table

-

The next step is to choose a game mode and a table that suits your skill level and budget. You can choose from various game modes, such as cash games, tournaments, sit and go, and more. You can also filter the tables by stakes, players, speed, and type. You can join any table that has an empty seat or create your own table and invite your friends. You can also switch tables anytime you want.

-

Step 3: Learn the basic rules and strategies of Texas Holdem Poker

-

The last step is to learn the basic rules and strategies of Texas Holdem Poker. If you are new to poker, you can check out the tutorials, tips, and guides that are available in the app. You can also practice with free chips or play money before you bet with real money. The basic rules of Texas Holdem Poker are as follows:

-
    -
  • Each player is dealt two cards face down, called hole cards.
  • -
  • A round of betting takes place, starting with the player to the left of the big blind.
  • -
  • Three cards are dealt face up on the table, called the flop.
  • -
  • Another round of betting takes place, starting with the player to the left of the dealer.
  • -
  • A fourth card is dealt face up on the table, called the turn.
  • -
  • Another round of betting takes place, starting with the player to the left of the dealer.
  • -
  • A fifth card is dealt face up on the table, called the river.
  • -
  • A final round of betting takes place, starting with the player to the left of the dealer.
  • -
  • The remaining players show their cards and compare their best five-card poker hand.
  • -
  • The player with the best hand wins the pot.
  • -
-

The basic strategies of Texas Holdem Poker are as follows:

-

free texas holdem poker online mod apk download
-zynga poker texas holdem online mod apk unlimited chips
-texas holdem poker online mod apk android 1
-texas holdem poker online mod apk latest version
-texas holdem poker online mod apk hack
-texas holdem poker online mod apk 2023
-texas holdem poker online mod apk offline
-texas holdem poker online mod apk revdl
-texas holdem poker online mod apk rexdl
-texas holdem poker online mod apk happymod
-texas holdem poker online mod apk no root
-texas holdem poker online mod apk anti ban
-texas holdem poker online mod apk vip
-texas holdem poker online mod apk pro
-texas holdem poker online mod apk premium
-texas holdem poker online mod apk cracked
-texas holdem poker online mod apk cheat
-texas holdem poker online mod apk generator
-texas holdem poker online mod apk money
-texas holdem poker online mod apk gold
-texas holdem poker online mod apk diamonds
-texas holdem poker online mod apk coins
-texas holdem poker online mod apk cash
-texas holdem poker online mod apk bonus
-texas holdem poker online mod apk rewards
-texas holdem poker online mod apk free spins
-texas holdem poker online mod apk mega win
-texas holdem poker online mod apk jackpot
-texas holdem poker online mod apk tournaments
-texas holdem poker online mod apk leagues
-texas holdem poker online mod apk events
-texas holdem poker online mod apk challenges
-texas holdem poker online mod apk missions
-texas holdem poker online mod apk quests
-texas holdem poker online mod apk achievements
-texas holdem poker online mod apk gifts
-texas holdem poker online mod apk friends
-texas holdem poker online mod apk invite codes
-texas holdem poker online mod apk referral codes
-texas holdem poker online mod apk redeem codes
-texas holdem poker online mod apk promo codes
-texas holdem poker online mod apk coupon codes
-texas holdem poker online mod apk reviews
-texas holdem poker online mod apk ratings
-texas holdem poker online mod apk tips and tricks
-texas holdem poker online mod apk guides and tutorials
-texas holdem poker online mod apk strategies and secrets
-texas holdem poker online mod apk rules and regulations

-
    -
  • Know when to fold, call, raise, or bluff depending on your cards and position.
  • -
  • Pay attention to your opponents' actions and patterns and try to guess their cards and intentions.
  • -
  • Use probability and odds to calculate your chances of winning or losing.
  • -
  • Avoid tilting or letting your emotions affect your decisions.
  • -
  • Manage your bankroll wisely and don't bet more than you can afford to lose.
  • -
-

How to win Texas Holdem Poker Online Mod APK?

-

Tip 1: Use the unlimited money feature to your advantage

-

One of the benefits of playing Texas Holdem Poker Online Mod APK is that you can get unlimited money, chips, and coins to play with. This means that you can bet as much as you want without worrying about running out of money. You can also use this feature to intimidate your opponents or make them think that you have a strong hand. However, be careful not to abuse this feature or make unrealistic bets that will make you look suspicious or foolish.

-

Tip 2: Bluff smartly and read your opponents' tells

-

Another way to win Texas Holdem Poker Online Mod APK is to bluff smartly and read your opponents' tells. Bluffing is a technique of making your opponents think that you have a better hand than you actually have. This can make them fold or call with weaker hands. However, bluffing is not always effective and can backfire if your opponents call your bluff or have a better hand. Therefore, you should bluff smartly and sparingly, and only when you have a good reason to do so. Some factors that can help you decide when to bluff are: - Your position: Bluffing from a late position can be more successful than bluffing from an early position, as you have more information about your opponents' actions and cards. - Your image: Bluffing from a tight image can be more successful than bluffing from a loose image, as your opponents will respect your bets and assume that you have a strong hand. - Your cards: Bluffing with some outs or potential to improve can be more successful than bluffing with nothing, as you have a chance to win even if you get called. - Your opponents: Bluffing against weak or timid opponents can be more successful than bluffing against strong or aggressive opponents, as they are more likely to fold or be scared by your bets. Reading your opponents' tells is another skill that can help you win Texas Holdem Poker Online Mod APK. Tells are clues or signs that reveal your opponents' emotions, intentions, or cards. They can be verbal or non-verbal, such as facial expressions, body language, gestures, tone of voice, chat messages, betting patterns, and so on. By reading your opponents' tells, you can gain an edge over them and make better decisions. Some examples of common tells are: - A player who looks away or avoids eye contact may be bluffing or hiding something. - A player who smiles or laughs nervously may be nervous or unsure about their hand. - A player who shakes or trembles may be excited or holding a strong hand. - A player who bets quickly or aggressively may be confident or trying to intimidate you. - A player who bets slowly or hesitantly may be weak or trying to trap you.

Tip 3: Practice regularly and learn from your mistakes

-

The final tip to win Texas Holdem Poker Online Mod APK is to practice regularly and learn from your mistakes. Poker is a game of skill and luck, and the only way to improve your skill is to play more and study more. You can practice with free chips or play money before you play with real money. You can also watch videos, read books, join forums, and follow blogs of professional poker players. You can also review your own games and analyze your strengths and weaknesses. By practicing regularly and learning from your mistakes, you can become a better poker player and win more often.

-

Conclusion

-

Summary of the main points

-

In conclusion, Texas Holdem Poker Online Mod APK is a modified version of the popular card game that lets you play online with other players from around the world. You can also get unlimited money, chips, and coins to bet as much as you want. To download and install Texas Holdem Poker Online Mod APK, you need to find a reliable source for the mod apk file, enable unknown sources on your device, and follow the instructions on the screen. To play Texas Holdem Poker Online Mod APK, you need to create an account or log in with Facebook, choose a game mode and a table, and learn the basic rules and strategies of Texas Holdem Poker. To win Texas Holdem Poker Online Mod APK, you need to use the unlimited money feature to your advantage, bluff smartly and read your opponents' tells, and practice regularly and learn from your mistakes.

-

Call to action

-

If you are ready to play Texas Holdem Poker Online Mod APK and have fun, then don't wait any longer. Download the app now and join the millions of poker fans who are enjoying the game every day. You will not regret it!

-

FAQs

-

Q: Is Texas Holdem Poker Online Mod APK safe to use?

-

A: Yes, Texas Holdem Poker Online Mod APK is safe to use as long as you download it from a reputable website that offers virus-free and malware-free files. However, you should always be careful when downloading any app from unknown sources and scan it with an antivirus software before installing it.

-

Q: Is Texas Holdem Poker Online Mod APK legal to use?

-

A: Yes, Texas Holdem Poker Online Mod APK is legal to use as long as you do not use it for illegal purposes or violate any terms of service of the original app. However, you should always check the laws and regulations of your country or region before playing online poker with real money.

-

Q: How can I update Texas Holdem Poker Online Mod APK?

-

A: To update Texas Holdem Poker Online Mod APK, you need to download the latest version of the mod apk file from the same website where you downloaded it before. Then, you need to uninstall the old version of the app and install the new version. You can also check the website for any updates or notifications about the mod apk file.

-

Q: How can I contact the developer of Texas Holdem Poker Online Mod APK?

-

A: To contact the developer of Texas Holdem Poker Online Mod APK, you can visit their official website or their social media pages. You can also send them an email or a message through the app. However, you should not expect a quick or positive response from them, as they may not support or endorse the mod apk file.

-

Q: How can I get more free chips and coins in Texas Holdem Poker Online Mod APK?

-

A: To get more free chips and coins in Texas Holdem Poker Online Mod APK, you can use the unlimited money feature that comes with the mod apk file. You can also claim daily bonuses, complete achievements, invite friends, watch ads, and participate in events and promotions. You can also buy more chips and coins with real money if you want to support the original app.

-

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Downloading My Clash Royale Matches and Sharing Them with Friends.md b/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Downloading My Clash Royale Matches and Sharing Them with Friends.md deleted file mode 100644 index a7cc03057325194995707ce3c4f023ba5751becf..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Downloading My Clash Royale Matches and Sharing Them with Friends.md +++ /dev/null @@ -1,161 +0,0 @@ -
-

How to Download Your Clash Royale Matches

-

Clash Royale is a popular real-time strategy game that pits you against other players from around the world in fast-paced card battles. You can collect and upgrade dozens of cards featuring your favorite Clash characters, spells, and defenses, as well as new ones like the Royales. You can also join or form a clan with other players to share cards and battle in clan wars for big rewards.

-

If you are a fan of Clash Royale, you might want to download your matches and watch them later. Maybe you want to analyze your mistakes and improve your skills, or maybe you want to relive your epic victories and show them off to your friends. Whatever your reason, downloading your matches is not as hard as you might think. In this article, we will show you two methods to download your Clash Royale matches easily and quickly.

-

download my clash royale matches


Download ○○○ https://urlca.com/2uO4G7



-

Before we start, you need to know how to find your player tag and access your battle log. Your player tag is a unique code that identifies your account in Clash Royale. You can find it by tapping on your name in the top left corner of the screen. Your battle log is where you can see all your recent battles, whether they are wins, losses, draws, or challenges. You can access it by tapping on the battle button in the bottom right corner of the screen.

-

Methods to Download Your Matches

-

Using a Screen Recorder App

-

A screen recorder app is an app that allows you to record everything that happens on your device's screen. You can use it to record your Clash Royale matches by simply launching the app, starting the recording, opening Clash Royale, and playing or watching your match. When you are done, you can stop the recording and save it to your device's storage.

-

Some of the pros of using a screen recorder app are:

-

How to download clash royale replays on android
-Clash royale match downloader app for ios
-Best way to save clash royale battles on pc
-Download clash royale matches with sound and commentary
-Clash royale match video downloader online free
-How to download clash royale matches from supercell id
-Clash royale match downloader apk latest version
-Download clash royale matches in hd quality
-How to download clash royale matches without root
-Clash royale match downloader for windows 10
-Download clash royale matches and share them on social media
-Clash royale match downloader mod apk unlimited gems
-How to download clash royale matches from tv royale
-Clash royale match downloader for mac os
-Download clash royale matches and edit them with video editor
-Clash royale match downloader no watermark
-How to download clash royale matches from clan wars
-Clash royale match downloader for chromebook
-Download clash royale matches and watch them offline
-Clash royale match downloader pro apk premium features
-How to download clash royale matches from global tournaments
-Clash royale match downloader for linux
-Download clash royale matches and convert them to mp4
-Clash royale match downloader with subtitles
-How to download clash royale matches from special challenges
-Clash royale match downloader for fire tablet
-Download clash royale matches and make gifs
-Clash royale match downloader with screen recorder
-How to download clash royale matches from season pass
-Clash royale match downloader for smart tv
-Download clash royale matches and upload them to youtube
-Clash royale match downloader with voice changer
-How to download clash royale matches from private tournaments
-Clash royale match downloader for bluestacks
-Download clash royale matches and analyze them with stats tool
-Clash royale match downloader with facecam
-How to download clash royale matches from friendly battles
-Clash royale match downloader for nox player
-Download clash royale matches and create montages
-Clash royale match downloader with music player

-
    -
  • You can record any match you want, whether it is yours or someone else's.
  • -
  • You can record in high quality and resolution.
  • -
  • You can record with sound and voice commentary.
  • -
  • You can edit your recordings within the app or with another app.
  • -
-

Some of the cons of using a screen recorder app are:

-
    -
  • You need to have enough storage space on your device to save your recordings.
  • -
  • You need to have a compatible device and operating system to run the app.
  • -
  • You might experience some lag or performance issues while recording.
  • -
  • You might need to pay for some features or remove ads from the app.
  • -
-

Some examples of screen recorder apps for Android and iOS devices are:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Using a Third-Party Website

-

A third-party website is a website that allows you to download your Clash Royale matches by entering your player tag or the URL of the match you want to download. You can find the URL of the match by tapping on the share button in the battle log and then copying it to your clipboard. You can then paste it into the website and download the match as a video file.

-

Some of the pros of using a third-party website are:

-
    -
  • You do not need to install any app on your device or use any storage space.
  • -
  • You can download any match you want, whether it is yours or someone else's.
  • -
  • You can download in different formats and resolutions.
  • -
  • You can download multiple matches at once.
  • -
-

Some of the cons of using a third-party website are:

-
    -
  • You need to have a stable internet connection to access the website and download the matches.
  • -
  • You might encounter some ads or pop-ups on the website.
  • -
  • You might need to wait for some time for the match to be processed and downloaded.
  • -
  • You might not be able to download some matches due to technical issues or legal restrictions.
  • -
-

Some examples of third-party websites for downloading Clash Royale matches are:

-
App NamePriceRatingFeatures
AZ Screen RecorderFree with in-app purchases4.5/5 stars on Google Play Store- Record in HD and Full HD quality
- Record with internal or external sound
- Record with facecam
- Edit videos with trimming, cutting, merging, adding text, etc.
- Live stream to YouTube, Facebook, Twitch, etc.
DU RecorderFree with in-app purchases4.3/5 stars on Google Play Store- Record in 1080p, 12Mbps, 60FPS quality
- Record with internal or external sound
- Record with facecam
- Edit videos with trimming, cropping, adding music, etc.
- Live stream to YouTube, Facebook, Twitch, etc.
Screen Recorder & Video EditorFree with in-app purchases4.7/5 stars on App Store- Record in 1080p quality
- Record with microphone sound
- Record with facecam
- Edit videos with trimming, splitting, adding filters, etc.
- Share videos to YouTube, Instagram, TikTok, etc.
TechSmith CaptureFree4.6/5 stars on App Store- Record in high quality
- Record with microphone sound
- Import videos from your device or cloud storage
- Share videos to Camtasia or Snagit for further editing
- - - - - - - - - - - - - - - - - - - - - - - -
Website NamePriceRatingFeatures
RoyaleAPI Replay DownloaderFreeN/A- Download matches from RoyaleAPI.com by entering your player tag or the match URL
- Download in MP4 format and 720p resolution
- Download multiple matches at once by entering multiple URLs separated by commas
- View match details and statistics on RoyaleAPI.com
RoyaleReplay.comFree with adsN/A- Download matches from Clash Royale by entering your player tag or the match URL
- Download in MP4 format and 480p resolution
- Download one match at a time
- View match details and statistics on RoyaleReplay.com
RoyaleTube.netFree with adsN/A- Download matches from Clash Royale by entering your player tag or the match URL
- Download in MP4 format and 360p resolution
- Download one match at a time
-
-

Conclusion

-

Downloading your Clash Royale matches can be a fun and useful way to enjoy the game and improve your skills. You can use either a screen recorder app or a third-party website to download your matches easily and quickly. However, you should also be aware of the pros and cons of each method and choose the one that suits your needs and preferences best.

-

Here are some tips and tricks for improving your gameplay and enjoying your matches:

-
    -
  • Watch your own replays and learn from your mistakes and successes.
  • -
  • Watch other players' replays and learn from their strategies and techniques.
  • -
  • Experiment with different decks and cards and find the ones that work best for you.
  • -
  • Join or form a clan and participate in clan wars and friendly battles.
  • -
  • Have fun and don't get too frustrated or angry when you lose.
  • -
-

FAQs

-

Q1: Can I download my matches directly from the Clash Royale app?

-

A1: No, you cannot download your matches directly from the Clash Royale app. You need to use a screen recorder app or a third-party website to do so.

-

Q2: How long are my matches stored in the battle log?

-

A2: Your matches are stored in the battle log for up to 25 days. After that, they will be deleted and you will not be able to download them anymore.

-

Q3: Can I download other players' matches from the leaderboards or tournaments?

-

A3: Yes, you can download other players' matches from the leaderboards or tournaments by tapping on their names and then on the replay button. You can use the same methods as described above to download their matches.

-

Q4: Can I edit or share my downloaded matches with others?

-

A4: Yes, you can edit or share your downloaded matches with others using any video editing or sharing app of your choice. You can also upload them to YouTube, Facebook, Instagram, or other social media platforms.

-

Q5: Can I get in trouble for downloading my matches or other players' matches?

-

A5: No, you cannot get in trouble for downloading your matches or other players' matches. However, you should respect the privacy and intellectual property rights of others and not use their matches for any malicious or illegal purposes.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/CVE-2020-2122 (brakeman) A Detailed Analysis of the JSON File Handling Flaw in Jenkins Plugin.md b/spaces/contluForse/HuggingGPT/assets/CVE-2020-2122 (brakeman) A Detailed Analysis of the JSON File Handling Flaw in Jenkins Plugin.md deleted file mode 100644 index 5875a9cc35ef6c21ab8ba9c433f75f59a5b56b89..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/CVE-2020-2122 (brakeman) A Detailed Analysis of the JSON File Handling Flaw in Jenkins Plugin.md +++ /dev/null @@ -1,6 +0,0 @@ -

CVE-2020-2122 (brakeman)


Download ○○○ https://ssurll.com/2uzw1Q



- - aaccfb2cb3
-
-
-

diff --git a/spaces/contluForse/HuggingGPT/assets/Download South Indian Recipe Book Pdf A Must-Have for Every Food Lover Who Wants to Try Something New and Exotic.md b/spaces/contluForse/HuggingGPT/assets/Download South Indian Recipe Book Pdf A Must-Have for Every Food Lover Who Wants to Try Something New and Exotic.md deleted file mode 100644 index 91a542e219279b4d2cb3e5f956ba7dbb447781b8..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Download South Indian Recipe Book Pdf A Must-Have for Every Food Lover Who Wants to Try Something New and Exotic.md +++ /dev/null @@ -1,19 +0,0 @@ - -

These recipe books feature just some of the food that you will be able to cook in your Air fryer. From French fries to spring rolls to even souffles, the possibilities are limitless! The Air fryer lets you fry, bake, grill and steam healthier, faster and more conveniently We hope that you will enjoy using the Air fryer as many others have around the world, and the recipes inside inspire you to cook healthy, well-balanced meals for you and your family.

-

The gowise air fryer recipes pdf will show you how to make the most of your gowise fryer. This gowise air fryer cookbook contains some delicious and creative recipes to get you started. From Chipotle tuna melt to creamy cheesecake, there are some amazing gowise air fryer recipes to help you please your family and guests.

-

Download South Indian Recipe Book Pdf


Download File === https://ssurll.com/2uzxjK



-

This philips air fryer recipe book contains numerous recipes for quick and healthy snacks and meals. You get over 30 philips air fryer recipes for side dishes, appetisers, entrees, desserts and kids meals. Philips airfryer recipe booklet will surely inspire you to cook wholesome meals for your loved ones.

-

Philips airfryer recipe booklet is full of inspiring recipes for healthy, low fat food. There are airfryer philips recipes that help you make full use of your favourite appliance for frying, grilling, baking and roasting. Just look into this amazing philips air fryer recipe book and prepare great tasting food for your family and friends.

-

Philips hd9220 air fryer recipe book is a collection of 25 healthy, quick and delicious recipes. In this philips airfryer recipe booklet, you will find recipes for both vegetarian and non-vegetarian appetizers. These Indian inspired Philips recipes will let you treat your family and friends to some lip smacking delicacies

-

If you have bought a Tefal actifry, your next step to get started is tefal actifry recipes book download. Tefal airfryer recipe book is probably one of the most comprehensive recipe books complete with meal plans, health information, cooking tips and a whole lot of recipes. It is a collection some of the best air fryer recipes and you will find everything from basic everyday meal recipes to gourmet food recipes.

-

Looking for free air fryer recipes? Here is the best free air fryer book for Cooks Companion owners. This recipe book lets you explore your taste preferences with best recipes from around the world. So, get ready to impress your family and friends with food that has less oil and more taste.

-

This free air fryer recipes pdf lets you do exactly what it claims: cooking meals in a healthier way. With this philips airfryer recipe book download, you get access to more than 30 amazing recipes from around the world pdf. Famous Chef, restaurateur, author and television presenter Stacie Stewart has contributed 10 recipes for this book.

-

With this free air fryer recipe book, you can prepare some amazing dishes such as Matcha cheesecake, Pumpkin cookies and even Croissants in your Cucina air fryer. It is a collection of 12 best air fryer recipes that help you create complete four course meals in a much healthier way.

-

This one is not just an air fryer recipes pdf, but a complete operation and safety manual. It includes some basic recipes and cooking times and tips for specific foods. It can get you started with air frying, but for more recipes you will have to check out some other airfryer recipes book.

-

-

This air fryer recipes pdf will get you started with your Glen air fryer. It is a collection of 9 simple air fryer recipes that give your favourite foods such as fish fingers and potato wedges a healthy makeover. The cook book is very well illustrated with high resolution pictures.

-

This air fryer cookbook pdf lets you prepare the most popular potato and meat dishes in your Habor Air Fryer. It also contains 4 dessert recipes. What more, there is a comprehensive guide to cooking times and temperatures, so you can prepare all your favourite foods with ease, in a much healthier way.

-

South Indian masala vada or parippu vada also known as Dal vada goes low oil.Still as crunchy and tasty and super delish! If you are looking for air fryer south Indian recipes then this is the one to try out..

-

We also have a similar blog post for beginner instant pot recipes. It has been downloaded by millions of instant pot readers and perfect for instant pot inspiration. You can access it by clicking here.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/utils/misc.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/utils/misc.py deleted file mode 100644 index eb862a82bd47c8624db3dd5c6fb6ad8a03b62466..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/utils/misc.py +++ /dev/null @@ -1,17 +0,0 @@ -def add_prefix(inputs, prefix): - """Add prefix for dict. - - Args: - inputs (dict): The input dict with str keys. - prefix (str): The prefix to add. - - Returns: - - dict: The dict with keys updated with ``prefix``. - """ - - outputs = dict() - for name, value in inputs.items(): - outputs[f'{prefix}.{name}'] = value - - return outputs diff --git a/spaces/crashedice/signify/SOURCE/yolo_files/utils/wandb_logging/__init__.py b/spaces/crashedice/signify/SOURCE/yolo_files/utils/wandb_logging/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_api.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_api.py deleted file mode 100644 index 7754f084fc7b656a44dfb4e2a0b6d0a10f112eaf..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_api.py +++ /dev/null @@ -1,281 +0,0 @@ -from __future__ import annotations - -import contextlib -import logging -import os -import time -import warnings -from abc import ABC, abstractmethod -from dataclasses import dataclass -from threading import local -from typing import TYPE_CHECKING, Any - -from ._error import Timeout - -if TYPE_CHECKING: - from types import TracebackType - -_LOGGER = logging.getLogger("filelock") - - -# This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__ -# is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired -# again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak) -class AcquireReturnProxy: - """A context aware object that will release the lock file when exiting.""" - - def __init__(self, lock: BaseFileLock) -> None: - self.lock = lock - - def __enter__(self) -> BaseFileLock: - return self.lock - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - ) -> None: - self.lock.release() - - -@dataclass -class FileLockContext: - """A dataclass which holds the context for a ``BaseFileLock`` object.""" - - # The context is held in a separate class to allow optional use of thread local storage via the - # ThreadLocalFileContext class. - - #: The path to the lock file. - lock_file: str - - #: The default timeout value. - timeout: float - - #: The mode for the lock files - mode: int - - #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held - lock_file_fd: int | None = None - - #: The lock counter is used for implementing the nested locking mechanism. - lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0 - - -class ThreadLocalFileContext(FileLockContext, local): - """A thread local version of the ``FileLockContext`` class.""" - - -class BaseFileLock(ABC, contextlib.ContextDecorator): - """Abstract base class for a file lock object.""" - - def __init__( - self, - lock_file: str | os.PathLike[Any], - timeout: float = -1, - mode: int = 0o644, - thread_local: bool = True, # noqa: FBT001, FBT002 - ) -> None: - """ - Create a new lock object. - - :param lock_file: path to the file - :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in - the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it - to a negative value. A timeout of 0 means, that there is exactly one attempt to acquire the file lock. - :param mode: file permissions for the lockfile. - :param thread_local: Whether this object's internal context should be thread local or not. - If this is set to ``False`` then the lock will be reentrant across threads. - """ - self._is_thread_local = thread_local - - # Create the context. Note that external code should not work with the context directly and should instead use - # properties of this class. - kwargs: dict[str, Any] = { - "lock_file": os.fspath(lock_file), - "timeout": timeout, - "mode": mode, - } - self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs) - - def is_thread_local(self) -> bool: - """:return: a flag indicating if this lock is thread local or not""" - return self._is_thread_local - - @property - def lock_file(self) -> str: - """:return: path to the lock file""" - return self._context.lock_file - - @property - def timeout(self) -> float: - """ - :return: the default timeout value, in seconds - - .. versionadded:: 2.0.0 - """ - return self._context.timeout - - @timeout.setter - def timeout(self, value: float | str) -> None: - """ - Change the default timeout value. - - :param value: the new value, in seconds - """ - self._context.timeout = float(value) - - @abstractmethod - def _acquire(self) -> None: - """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file.""" - raise NotImplementedError - - @abstractmethod - def _release(self) -> None: - """Releases the lock and sets self._context.lock_file_fd to None.""" - raise NotImplementedError - - @property - def is_locked(self) -> bool: - """ - - :return: A boolean indicating if the lock file is holding the lock currently. - - .. versionchanged:: 2.0.0 - - This was previously a method and is now a property. - """ - return self._context.lock_file_fd is not None - - @property - def lock_counter(self) -> int: - """:return: The number of times this lock has been acquired (but not yet released).""" - return self._context.lock_counter - - def acquire( - self, - timeout: float | None = None, - poll_interval: float = 0.05, - *, - poll_intervall: float | None = None, - blocking: bool = True, - ) -> AcquireReturnProxy: - """ - Try to acquire the file lock. - - :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and - if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired - :param poll_interval: interval of trying to acquire the lock file - :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead - :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the - first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. - :raises Timeout: if fails to acquire lock within the timeout period - :return: a context object that will unlock the file when the context is exited - - .. code-block:: python - - # You can use this method in the context manager (recommended) - with lock.acquire(): - pass - - # Or use an equivalent try-finally construct: - lock.acquire() - try: - pass - finally: - lock.release() - - .. versionchanged:: 2.0.0 - - This method returns now a *proxy* object instead of *self*, - so that it can be used in a with statement without side effects. - - """ - # Use the default timeout, if no timeout is provided. - if timeout is None: - timeout = self._context.timeout - - if poll_intervall is not None: - msg = "use poll_interval instead of poll_intervall" - warnings.warn(msg, DeprecationWarning, stacklevel=2) - poll_interval = poll_intervall - - # Increment the number right at the beginning. We can still undo it, if something fails. - self._context.lock_counter += 1 - - lock_id = id(self) - lock_filename = self.lock_file - start_time = time.perf_counter() - try: - while True: - if not self.is_locked: - _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) - self._acquire() - if self.is_locked: - _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) - break - if blocking is False: - _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) - raise Timeout(lock_filename) # noqa: TRY301 - if 0 <= timeout < time.perf_counter() - start_time: - _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) - raise Timeout(lock_filename) # noqa: TRY301 - msg = "Lock %s not acquired on %s, waiting %s seconds ..." - _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) - time.sleep(poll_interval) - except BaseException: # Something did go wrong, so decrement the counter. - self._context.lock_counter = max(0, self._context.lock_counter - 1) - raise - return AcquireReturnProxy(lock=self) - - def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002 - """ - Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. Also - note, that the lock file itself is not automatically deleted. - - :param force: If true, the lock counter is ignored and the lock is released in every case/ - """ - if self.is_locked: - self._context.lock_counter -= 1 - - if self._context.lock_counter == 0 or force: - lock_id, lock_filename = id(self), self.lock_file - - _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) - self._release() - self._context.lock_counter = 0 - _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) - - def __enter__(self) -> BaseFileLock: - """ - Acquire the lock. - - :return: the lock object - """ - self.acquire() - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - ) -> None: - """ - Release the lock. - - :param exc_type: the exception type if raised - :param exc_value: the exception value if raised - :param traceback: the exception traceback if raised - """ - self.release() - - def __del__(self) -> None: - """Called when the lock object is deleted.""" - self.release(force=True) - - -__all__ = [ - "BaseFileLock", - "AcquireReturnProxy", -] diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/tests/abstract/get.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/tests/abstract/get.py deleted file mode 100644 index baa9aa4a915f618369b53a28fd106e000aed7b9c..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/tests/abstract/get.py +++ /dev/null @@ -1,377 +0,0 @@ -class AbstractGetTests: - def test_get_file_to_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 1a - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - assert local_fs.isdir(target) - - target_file2 = local_join(target, "file2") - target_subfile1 = local_join(target, "subfile1") - - # Copy from source directory - fs.get(fs_join(source, "file2"), target) - assert local_fs.isfile(target_file2) - - # Copy from sub directory - fs.get(fs_join(source, "subdir", "subfile1"), target) - assert local_fs.isfile(target_subfile1) - - # Remove copied files - local_fs.rm([target_file2, target_subfile1]) - assert not local_fs.exists(target_file2) - assert not local_fs.exists(target_subfile1) - - # Repeat with trailing slash on target - fs.get(fs_join(source, "file2"), target + "/") - assert local_fs.isdir(target) - assert local_fs.isfile(target_file2) - - fs.get(fs_join(source, "subdir", "subfile1"), target + "/") - assert local_fs.isfile(target_subfile1) - - def test_get_file_to_new_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 1b - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - fs.get( - fs_join(source, "subdir", "subfile1"), local_join(target, "newdir/") - ) # Note trailing slash - - assert local_fs.isdir(target) - assert local_fs.isdir(local_join(target, "newdir")) - assert local_fs.isfile(local_join(target, "newdir", "subfile1")) - - def test_get_file_to_file_in_existing_directory( - self, - fs, - fs_join, - fs_path, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 1c - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - fs.get(fs_join(source, "subdir", "subfile1"), local_join(target, "newfile")) - assert local_fs.isfile(local_join(target, "newfile")) - - def test_get_file_to_file_in_new_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 1d - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - fs.get( - fs_join(source, "subdir", "subfile1"), - local_join(target, "newdir", "newfile"), - ) - assert local_fs.isdir(local_join(target, "newdir")) - assert local_fs.isfile(local_join(target, "newdir", "newfile")) - - def test_get_directory_to_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 1e - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - for source_slash, target_slash in zip([False, True], [False, True]): - s = fs_join(source, "subdir") - if source_slash: - s += "/" - t = target + "/" if target_slash else target - - # Without recursive does nothing - # ERROR: erroneously creates new directory - # fs.get(s, t) - # assert fs.ls(target) == [] - - # With recursive - fs.get(s, t, recursive=True) - if source_slash: - assert local_fs.isfile(local_join(target, "subfile1")) - assert local_fs.isfile(local_join(target, "subfile2")) - assert local_fs.isdir(local_join(target, "nesteddir")) - assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile")) - - local_fs.rm( - [ - local_join(target, "subfile1"), - local_join(target, "subfile2"), - local_join(target, "nesteddir"), - ], - recursive=True, - ) - else: - assert local_fs.isdir(local_join(target, "subdir")) - assert local_fs.isfile(local_join(target, "subdir", "subfile1")) - assert local_fs.isfile(local_join(target, "subdir", "subfile2")) - assert local_fs.isdir(local_join(target, "subdir", "nesteddir")) - assert local_fs.isfile( - local_join(target, "subdir", "nesteddir", "nestedfile") - ) - - local_fs.rm(local_join(target, "subdir"), recursive=True) - assert local_fs.ls(target) == [] - - # Limit by maxdepth - # ERROR: maxdepth ignored here - - def test_get_directory_to_new_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 1f - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - for source_slash, target_slash in zip([False, True], [False, True]): - s = fs_join(source, "subdir") - if source_slash: - s += "/" - t = local_join(target, "newdir") - if target_slash: - t += "/" - - # Without recursive does nothing - # ERROR: erroneously creates new directory - # fs.get(s, t) - # assert fs.ls(target) == [] - - # With recursive - fs.get(s, t, recursive=True) - assert local_fs.isdir(local_join(target, "newdir")) - assert local_fs.isfile(local_join(target, "newdir", "subfile1")) - assert local_fs.isfile(local_join(target, "newdir", "subfile2")) - assert local_fs.isdir(local_join(target, "newdir", "nesteddir")) - assert local_fs.isfile( - local_join(target, "newdir", "nesteddir", "nestedfile") - ) - - local_fs.rm(local_join(target, "newdir"), recursive=True) - assert local_fs.ls(target) == [] - - # Limit by maxdepth - # ERROR: maxdepth ignored here - - def test_get_glob_to_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 1g - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - # for target_slash in [False, True]: - for target_slash in [False]: - t = target + "/" if target_slash else target - - # Without recursive - fs.get(fs_join(source, "subdir", "*"), t) - assert local_fs.isfile(local_join(target, "subfile1")) - assert local_fs.isfile(local_join(target, "subfile2")) - # assert not local_fs.isdir(local_join(target, "nesteddir")) # ERROR - assert not local_fs.isdir(local_join(target, "subdir")) - - # With recursive - - # Limit by maxdepth - - def test_get_glob_to_new_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 1h - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - for target_slash in [False, True]: - t = fs_join(target, "newdir") - if target_slash: - t += "/" - - # Without recursive - fs.get(fs_join(source, "subdir", "*"), t) - assert local_fs.isdir(local_join(target, "newdir")) - assert local_fs.isfile(local_join(target, "newdir", "subfile1")) - assert local_fs.isfile(local_join(target, "newdir", "subfile2")) - # ERROR - do not copy empty directory - # assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) - - local_fs.rm(local_join(target, "newdir"), recursive=True) - assert local_fs.ls(target) == [] - - # With recursive - fs.get(fs_join(source, "subdir", "*"), t, recursive=True) - assert local_fs.isdir(local_join(target, "newdir")) - assert local_fs.isfile(local_join(target, "newdir", "subfile1")) - assert local_fs.isfile(local_join(target, "newdir", "subfile2")) - assert local_fs.isdir(local_join(target, "newdir", "nesteddir")) - assert local_fs.isfile( - local_join(target, "newdir", "nesteddir", "nestedfile") - ) - - local_fs.rm(local_join(target, "newdir"), recursive=True) - assert local_fs.ls(target) == [] - - # Limit by maxdepth - # ERROR: this is not correct - - def test_get_list_of_files_to_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 2a - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - source_files = [ - fs_join(source, "file1"), - fs_join(source, "file2"), - fs_join(source, "subdir", "subfile1"), - ] - - for target_slash in [False, True]: - t = target + "/" if target_slash else target - - fs.get(source_files, t) - assert local_fs.isfile(local_join(target, "file1")) - assert local_fs.isfile(local_join(target, "file2")) - assert local_fs.isfile(local_join(target, "subfile1")) - - local_fs.rm(local_fs.find(target)) - assert local_fs.ls(target) == [] - - def test_get_list_of_files_to_new_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - local_fs, - local_join, - local_target, - ): - # Copy scenario 2b - source = fs_bulk_operations_scenario_0 - - target = local_target - local_fs.mkdir(target) - - source_files = [ - fs_join(source, "file1"), - fs_join(source, "file2"), - fs_join(source, "subdir", "subfile1"), - ] - - fs.get(source_files, local_join(target, "newdir") + "/") # Note trailing slash - assert local_fs.isdir(local_join(target, "newdir")) - assert local_fs.isfile(local_join(target, "newdir", "file1")) - assert local_fs.isfile(local_join(target, "newdir", "file2")) - assert local_fs.isfile(local_join(target, "newdir", "subfile1")) - - def test_get_directory_recursive( - self, fs, fs_join, fs_path, local_fs, local_join, local_target - ): - # https://github.com/fsspec/filesystem_spec/issues/1062 - # Recursive cp/get/put of source directory into non-existent target directory. - src = fs_join(fs_path, "src") - src_file = fs_join(src, "file") - fs.mkdir(src) - fs.touch(src_file) - - target = local_target - - # get without slash - assert not local_fs.exists(target) - for loop in range(2): - fs.get(src, target, recursive=True) - assert local_fs.isdir(target) - - if loop == 0: - assert local_fs.isfile(local_join(target, "file")) - assert not local_fs.exists(local_join(target, "src")) - else: - assert local_fs.isfile(local_join(target, "file")) - assert local_fs.isdir(local_join(target, "src")) - assert local_fs.isfile(local_join(target, "src", "file")) - - local_fs.rm(target, recursive=True) - - # get with slash - assert not local_fs.exists(target) - for loop in range(2): - fs.get(src + "/", target, recursive=True) - assert local_fs.isdir(target) - assert local_fs.isfile(local_join(target, "file")) - assert not local_fs.exists(local_join(target, "src")) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/_space_api.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/_space_api.py deleted file mode 100644 index 2384ef5829d0d2f4f6fdbfccd69ea7d3d50f9da9..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/_space_api.py +++ /dev/null @@ -1,101 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from dataclasses import dataclass -from enum import Enum -from typing import Dict, Optional - - -class SpaceStage(str, Enum): - """ - Enumeration of possible stage of a Space on the Hub. - - Value can be compared to a string: - ```py - assert SpaceStage.BUILDING == "BUILDING" - ``` - - Taken from https://github.com/huggingface/moon-landing/blob/main/server/repo_types/SpaceInfo.ts#L61 (private url). - """ - - # Copied from moon-landing > server > repo_types > SpaceInfo.ts (private repo) - NO_APP_FILE = "NO_APP_FILE" - CONFIG_ERROR = "CONFIG_ERROR" - BUILDING = "BUILDING" - BUILD_ERROR = "BUILD_ERROR" - RUNNING = "RUNNING" - RUNNING_BUILDING = "RUNNING_BUILDING" - RUNTIME_ERROR = "RUNTIME_ERROR" - DELETING = "DELETING" - STOPPED = "STOPPED" - PAUSED = "PAUSED" - - -class SpaceHardware(str, Enum): - """ - Enumeration of hardwares available to run your Space on the Hub. - - Value can be compared to a string: - ```py - assert SpaceHardware.CPU_BASIC == "cpu-basic" - ``` - - Taken from https://github.com/huggingface/moon-landing/blob/main/server/repo_types/SpaceInfo.ts#L73 (private url). - """ - - CPU_BASIC = "cpu-basic" - CPU_UPGRADE = "cpu-upgrade" - T4_SMALL = "t4-small" - T4_MEDIUM = "t4-medium" - A10G_SMALL = "a10g-small" - A10G_LARGE = "a10g-large" - A100_LARGE = "a100-large" - - -@dataclass -class SpaceRuntime: - """ - Contains information about the current runtime of a Space. - - Args: - stage (`str`): - Current stage of the space. Example: RUNNING. - hardware (`str` or `None`): - Current hardware of the space. Example: "cpu-basic". Can be `None` if Space - is `BUILDING` for the first time. - requested_hardware (`str` or `None`): - Requested hardware. Can be different than `hardware` especially if the request - has just been made. Example: "t4-medium". Can be `None` if no hardware has - been requested yet. - sleep_time (`int` or `None`): - Number of seconds the Space will be kept alive after the last request. By default (if value is `None`), the - Space will never go to sleep if it's running on an upgraded hardware, while it will go to sleep after 48 - hours on a free 'cpu-basic' hardware. For more details, see https://huggingface.co/docs/hub/spaces-gpus#sleep-time. - raw (`dict`): - Raw response from the server. Contains more information about the Space - runtime like number of replicas, number of cpu, memory size,... - """ - - stage: SpaceStage - hardware: Optional[SpaceHardware] - requested_hardware: Optional[SpaceHardware] - sleep_time: Optional[int] - raw: Dict - - def __init__(self, data: Dict) -> None: - self.stage = data["stage"] - self.hardware = data["hardware"]["current"] - self.requested_hardware = data["hardware"]["requested"] - self.sleep_time = data["gcTimeout"] - self.raw = data diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py deleted file mode 100644 index f8c40889bce7ec9b9645011b5e2ee8db37464b6a..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -from . import _base -from ._axes import * - -# Backcompat. -from ._axes import Axes as Subplot - - -class _SubplotBaseMeta(type): - def __instancecheck__(self, obj): - return (isinstance(obj, _base._AxesBase) - and obj.get_subplotspec() is not None) - - -class SubplotBase(metaclass=_SubplotBaseMeta): - pass - - -def subplot_class_factory(cls): return cls diff --git a/spaces/ddiddi/bhasha.dev/share_btn.py b/spaces/ddiddi/bhasha.dev/share_btn.py deleted file mode 100644 index a85c7c3c16e0bbeae3a8d880baf401903bca7337..0000000000000000000000000000000000000000 --- a/spaces/ddiddi/bhasha.dev/share_btn.py +++ /dev/null @@ -1,60 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - const gradioEl = document.querySelector('body > gradio-app'); - const imgEls = gradioEl.querySelectorAll('#gallery img'); - const promptTxt = gradioEl.querySelector('#prompt-text-input input').value; - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!imgEls.length){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const files = await Promise.all( - [...imgEls].map(async (imgEl) => { - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const fileName = `diffuse-the-rest-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - }) - ); - const urls = await Promise.all(files.map((f) => uploadFile(f))); - const htmlImgs = urls.map(url => ``); - const descriptionMd = `
-${htmlImgs.join(`\n`)} -
`; - const params = new URLSearchParams({ - title: promptTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/stabilityai/stable-diffusion/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_ddim_flax.py b/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_ddim_flax.py deleted file mode 100644 index db248c33077bf502e31cb2ab97141744b828b514..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_ddim_flax.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion -# and https://github.com/hojonathanho/diffusion - -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import flax -import jax.numpy as jnp - -from ..configuration_utils import ConfigMixin, register_to_config -from .scheduling_utils_flax import ( - CommonSchedulerState, - FlaxKarrasDiffusionSchedulers, - FlaxSchedulerMixin, - FlaxSchedulerOutput, - add_noise_common, - get_velocity_common, -) - - -@flax.struct.dataclass -class DDIMSchedulerState: - common: CommonSchedulerState - final_alpha_cumprod: jnp.ndarray - - # setable values - init_noise_sigma: jnp.ndarray - timesteps: jnp.ndarray - num_inference_steps: Optional[int] = None - - @classmethod - def create( - cls, - common: CommonSchedulerState, - final_alpha_cumprod: jnp.ndarray, - init_noise_sigma: jnp.ndarray, - timesteps: jnp.ndarray, - ): - return cls( - common=common, - final_alpha_cumprod=final_alpha_cumprod, - init_noise_sigma=init_noise_sigma, - timesteps=timesteps, - ) - - -@dataclass -class FlaxDDIMSchedulerOutput(FlaxSchedulerOutput): - state: DDIMSchedulerState - - -class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin): - """ - Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising - diffusion probabilistic models (DDPMs) with non-Markovian guidance. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - For more details, see the original paper: https://arxiv.org/abs/2010.02502 - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`jnp.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - clip_sample (`bool`, default `True`): - option to clip predicted sample between -1 and 1 for numerical stability. - set_alpha_to_one (`bool`, default `True`): - each diffusion step uses the value of alphas product at that step and at the previous one. For the final - step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, - otherwise it uses the value of alpha at step 0. - steps_offset (`int`, default `0`): - an offset added to the inference steps. You can use a combination of `offset=1` and - `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in - stable diffusion. - prediction_type (`str`, default `epsilon`): - indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`. - `v-prediction` is not supported for this scheduler. - dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): - the `dtype` used for params and computation. - """ - - _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] - - dtype: jnp.dtype - - @property - def has_state(self): - return True - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[jnp.ndarray] = None, - set_alpha_to_one: bool = True, - steps_offset: int = 0, - prediction_type: str = "epsilon", - dtype: jnp.dtype = jnp.float32, - ): - self.dtype = dtype - - def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDIMSchedulerState: - if common is None: - common = CommonSchedulerState.create(self) - - # At every step in ddim, we are looking into the previous alphas_cumprod - # For the final step, there is no previous alphas_cumprod because we are already at 0 - # `set_alpha_to_one` decides whether we set this parameter simply to one or - # whether we use the final alpha of the "non-previous" one. - final_alpha_cumprod = ( - jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] - ) - - # standard deviation of the initial noise distribution - init_noise_sigma = jnp.array(1.0, dtype=self.dtype) - - timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] - - return DDIMSchedulerState.create( - common=common, - final_alpha_cumprod=final_alpha_cumprod, - init_noise_sigma=init_noise_sigma, - timesteps=timesteps, - ) - - def scale_model_input( - self, state: DDIMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None - ) -> jnp.ndarray: - """ - Args: - state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. - sample (`jnp.ndarray`): input sample - timestep (`int`, optional): current timestep - - Returns: - `jnp.ndarray`: scaled input sample - """ - return sample - - def set_timesteps( - self, state: DDIMSchedulerState, num_inference_steps: int, shape: Tuple = () - ) -> DDIMSchedulerState: - """ - Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - state (`DDIMSchedulerState`): - the `FlaxDDIMScheduler` state data class instance. - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - """ - step_ratio = self.config.num_train_timesteps // num_inference_steps - # creates integer timesteps by multiplying by ratio - # rounding to avoid issues when num_inference_step is power of 3 - timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] + self.config.steps_offset - - return state.replace( - num_inference_steps=num_inference_steps, - timesteps=timesteps, - ) - - def _get_variance(self, state: DDIMSchedulerState, timestep, prev_timestep): - alpha_prod_t = state.common.alphas_cumprod[timestep] - alpha_prod_t_prev = jnp.where( - prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod - ) - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev - - variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) - - return variance - - def step( - self, - state: DDIMSchedulerState, - model_output: jnp.ndarray, - timestep: int, - sample: jnp.ndarray, - eta: float = 0.0, - return_dict: bool = True, - ) -> Union[FlaxDDIMSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - state (`DDIMSchedulerState`): the `FlaxDDIMScheduler` state data class instance. - model_output (`jnp.ndarray`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`jnp.ndarray`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than FlaxDDIMSchedulerOutput class - - Returns: - [`FlaxDDIMSchedulerOutput`] or `tuple`: [`FlaxDDIMSchedulerOutput`] if `return_dict` is True, otherwise a - `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if state.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf - # Ideally, read DDIM paper in-detail understanding - - # Notation ( -> - # - pred_noise_t -> e_theta(x_t, t) - # - pred_original_sample -> f_theta(x_t, t) or x_0 - # - std_dev_t -> sigma_t - # - eta -> η - # - pred_sample_direction -> "direction pointing to x_t" - # - pred_prev_sample -> "x_t-1" - - # 1. get previous step value (=t-1) - prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps - - alphas_cumprod = state.common.alphas_cumprod - final_alpha_cumprod = state.final_alpha_cumprod - - # 2. compute alphas, betas - alpha_prod_t = alphas_cumprod[timestep] - alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], final_alpha_cumprod) - - beta_prod_t = 1 - alpha_prod_t - - # 3. compute predicted original sample from predicted noise also called - # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - if self.config.prediction_type == "epsilon": - pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) - pred_epsilon = model_output - elif self.config.prediction_type == "sample": - pred_original_sample = model_output - pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) - elif self.config.prediction_type == "v_prediction": - pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output - pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" - " `v_prediction`" - ) - - # 4. compute variance: "sigma_t(η)" -> see formula (16) - # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) - variance = self._get_variance(state, timestep, prev_timestep) - std_dev_t = eta * variance ** (0.5) - - # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon - - # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction - - if not return_dict: - return (prev_sample, state) - - return FlaxDDIMSchedulerOutput(prev_sample=prev_sample, state=state) - - def add_noise( - self, - state: DDIMSchedulerState, - original_samples: jnp.ndarray, - noise: jnp.ndarray, - timesteps: jnp.ndarray, - ) -> jnp.ndarray: - return add_noise_common(state.common, original_samples, noise, timesteps) - - def get_velocity( - self, - state: DDIMSchedulerState, - sample: jnp.ndarray, - noise: jnp.ndarray, - timesteps: jnp.ndarray, - ) -> jnp.ndarray: - return get_velocity_common(state.common, sample, noise, timesteps) - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/deepghs/auto_image_censor/visual.py b/spaces/deepghs/auto_image_censor/visual.py deleted file mode 100644 index 8450323f15c1d0d8badd572825a894e60c161b13..0000000000000000000000000000000000000000 --- a/spaces/deepghs/auto_image_censor/visual.py +++ /dev/null @@ -1,44 +0,0 @@ -from functools import lru_cache -from typing import List - -import matplotlib.pyplot as plt -from PIL import Image -from hbutils.color import rnd_colors - - -@lru_cache() -def _get_complete_classes(): - from nudenet import open_model_session - _, classes = open_model_session() - return classes - - -@lru_cache() -def _get_color_map(): - _all_classes = _get_complete_classes() - colors = rnd_colors(len(_get_complete_classes()), rnd=0) - return {cls_: (str(c),) for c, cls_ in zip(colors, _all_classes)} - - -CLS_MAP = { - 'EXPOSED_BREAST_F': 'nipple', - 'EXPOSED_GENITALIA_F': 'pussy', - 'EXPOSED_GENITALIA_M': 'penis', - 'EXPOSED_ANUS': 'anus', -} - - -def plot_detection(pil_img: Image.Image, detection: List): - plt.tight_layout() - plt.imshow(pil_img) - ax = plt.gca() - _color_map = _get_color_map() - for item in detection: - score = item['score'] - xmin, ymin, xmax, ymax = item['box'] - class_ = item['label'] - - box_color, = _color_map[class_] - ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=box_color, linewidth=3)) - text = f'{CLS_MAP.get(class_, class_)}: {score * 100:.2f}%' - ax.text(xmin, ymin, text, fontsize=8, bbox=dict(facecolor=box_color, alpha=0.5)) diff --git a/spaces/dejinlee/art/README.md b/spaces/dejinlee/art/README.md deleted file mode 100644 index 86f26f16fcfab57969dd892758832488cf30ffeb..0000000000000000000000000000000000000000 --- a/spaces/dejinlee/art/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Art -emoji: 🌖 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/diacanFperku/AutoGPT/Karizma Album 12x30 Background Psd Files Free 18.md b/spaces/diacanFperku/AutoGPT/Karizma Album 12x30 Background Psd Files Free 18.md deleted file mode 100644 index a51d5741c9629ee7f19b8f89c469cca381d23e78..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Karizma Album 12x30 Background Psd Files Free 18.md +++ /dev/null @@ -1,6 +0,0 @@ -

Karizma Album 12x30 Background Psd Files Free 18


Download ->>->>->> https://gohhs.com/2uFV1j



- -Modern Marriage Photo Album Design 12x30 PSD Templates Free Download ... Karizma Design PSD File Links 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ... Templates - Karizma Album Designs Download Karizzma PSD Background ... 1fdad05405
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/La-Gran-Conexion-Arnie-Warren-PDF-2021.md b/spaces/diacanFperku/AutoGPT/La-Gran-Conexion-Arnie-Warren-PDF-2021.md deleted file mode 100644 index 17541befba90ba301c13ae127c20e1315b657644..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/La-Gran-Conexion-Arnie-Warren-PDF-2021.md +++ /dev/null @@ -1,70 +0,0 @@ -## La gran conexion arnie warren PDF - - - - - - ![La Gran Conexion Arnie Warren PDF \[2021\]](https://ceylonmediweb.com/wp-content/uploads/2019/12/1566108581098.jpg) - - - - - -**Download ---> [https://urluso.com/2txxx2](https://urluso.com/2txxx2)** - - - - - - - - - - - - Here is a possible title and article with HTML formatting for the keyword "La gran conexion arnie warren PDF": - -# La gran conexion: A book review - - - -La gran conexion (The Great Connection) is a book by Arnie Warren that aims to help readers discover the key to successful professional relationships. The book is written as a story that follows the protagonist, Tom, as he learns about the four different styles of behavior that people tend to exhibit: analytical, driver, amiable and expressive. By understanding his own style and those of others, Tom is able to improve his communication, leadership and teamwork skills. - - - -The book is based on the DISC model of personality assessment, which was developed by William Moulton Marston in the 1920s. The model divides people into four quadrants based on their levels of dominance and sociability. The book explains how each style has its strengths and weaknesses, and how to adapt to different situations and people. The book also provides practical tips and exercises to help readers apply the concepts to their own lives. - - - -La gran conexion is a useful and engaging book for anyone who wants to improve their interpersonal skills and achieve more success in their professional and personal lives. The book is written in a simple and clear language, and uses examples and anecdotes to illustrate the points. The book is available in Spanish as a paperback or as a PDF file that can be downloaded from various online sources[^1^] [^2^] [^3^] [^4^]. - -Here is a possible continuation of the article with HTML formatting for the keyword "La gran conexion arnie warren PDF": - -The book is divided into three parts. The first part introduces the four styles of behavior and how they affect Tom's life and career. The second part shows how Tom learns to identify and connect with each style through the guidance of his mentor, Frank. The third part reveals how Tom applies his new skills to improve his relationships with his family, friends, colleagues and clients. - - - -The book is not only informative, but also entertaining and inspiring. The story is full of humor, drama and emotion, and the characters are realistic and relatable. The book also includes a self-assessment test that helps readers identify their own style and those of others. The book also offers a summary of the main points and a list of action steps at the end of each chapter. - - - -La gran conexion is a book that can change your life for the better. It can help you understand yourself and others better, communicate more effectively, build trust and rapport, resolve conflicts, motivate and influence others, and achieve your goals. It can also help you enjoy your life more by connecting with your true self and others on a deeper level. - -Here are a few more paragraphs for the article with HTML formatting for the keyword "La gran conexion arnie warren PDF": - -La gran conexion is not only a book for professionals, but also for anyone who wants to improve their personal relationships. The book can help you connect with your spouse, children, parents, friends and neighbors. It can also help you deal with difficult people and situations. The book can teach you how to appreciate and respect the differences and similarities among people, and how to create harmony and synergy. - - - -La gran conexion is also a book for personal growth and development. The book can help you discover your strengths and weaknesses, your values and beliefs, your passions and goals. It can also help you overcome your fears and doubts, your limiting beliefs and habits, your negative emotions and thoughts. The book can inspire you to be more confident, creative, positive and happy. - - - -La gran conexion is a book that can transform your life. It can help you connect with yourself and others in a meaningful and fulfilling way. It can help you live a life of purpose and joy. It can help you make a difference in the world. It can help you achieve the great connection. - - dfd1c89656 - - - - - diff --git a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/train_ms.py b/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/train_ms.py deleted file mode 100644 index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/train_ms.py +++ /dev/null @@ -1,402 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -import shutil -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cudnn.benchmark = True -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = True -torch.set_float32_matmul_precision('medium') -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '65280' - - hps = utils.get_hparams() - if not hps.cont: - shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') - shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') - shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - batch_size=1, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: - print("Using noise scaled MAS for VITS2") - use_noise_scaled_mas = True - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - use_noise_scaled_mas = False - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: - print("Using duration discriminator for VITS2") - use_duration_discriminator = True - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: - if hps.data.n_speakers == 0: - raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") - use_spk_conditioned_encoder = True - else: - print("Using normal encoder for VITS1") - use_spk_conditioned_encoder = False - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial = mas_noise_scale_initial, - noise_scale_delta = noise_scale_delta, - **hps.model).cuda(rank) - - freeze_enc = getattr(hps.model, "freeze_enc", False) - if freeze_enc: - print("freeze encoder !!!") - for param in net_g.enc_p.parameters(): - param.requires_grad = False - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - - pretrain_dir = None - if pretrain_dir is None: - try: - if net_dur_disc is not None: - _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) - _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer=not hps.cont) - _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer=not hps.cont) - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - else: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, - optim_g, True) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, - optim_d, True) - - - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - if net_dur_disc is not None: - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update( - {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - if net_dur_disc is not None: - utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 5) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict.update({ - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - }) - audio_dict.update({ - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]] - }) - image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - -if __name__ == "__main__": - main() diff --git a/spaces/digitalxingtong/Miiu-Bert-Vits2/text/japanese.py b/spaces/digitalxingtong/Miiu-Bert-Vits2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Miiu-Bert-Vits2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/digitalxingtong/Nailv-Bert-Vits2/README_zh.md b/spaces/digitalxingtong/Nailv-Bert-Vits2/README_zh.md deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nailv-Bert-Vits2/README_zh.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/dineshreddy/WALT/mmdet/version.py b/spaces/dineshreddy/WALT/mmdet/version.py deleted file mode 100644 index a3b741aed16212ad1dee277d519b259ae3184b19..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/version.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Open-MMLab. All rights reserved. - -__version__ = '2.11.0' -short_version = __version__ - - -def parse_version_info(version_str): - version_info = [] - for x in version_str.split('.'): - if x.isdigit(): - version_info.append(int(x)) - elif x.find('rc') != -1: - patch_version = x.split('rc') - version_info.append(int(patch_version[0])) - version_info.append(f'rc{patch_version[1]}') - return tuple(version_info) - - -version_info = parse_version_info(__version__) diff --git a/spaces/dmeck/RVC-Speakers/vits/text/cleaners.py b/spaces/dmeck/RVC-Speakers/vits/text/cleaners.py deleted file mode 100644 index 0482fd38c1f85a3df9e23746b82da4b8013e23c2..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/vits/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i 0 and re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]): - text += '.' - return text diff --git a/spaces/dmeck/RVC-Speakers/vits/utils.py b/spaces/dmeck/RVC-Speakers/vits/utils.py deleted file mode 100644 index 1de37df268eb85dcbf636db950b4e1692245bddc..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/vits/utils.py +++ /dev/null @@ -1,436 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch -import regex as re - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - -zh_pattern = re.compile(r'[\u4e00-\u9fa5]') -en_pattern = re.compile(r'[a-zA-Z]') -jp_pattern = re.compile(r'[\u3040-\u30ff\u31f0-\u31ff]') -kr_pattern = re.compile(r'[\uac00-\ud7af\u1100-\u11ff\u3130-\u318f\ua960-\ua97f]') -num_pattern = re.compile(r'[0-9]') -comma = r"(?<=[.。!!??;;,,、::'\"‘“”’()()《》「」~——])" # 向前匹配但固定长度 -tags = {'ZH': '[ZH]', 'EN': '[EN]', 'JP': '[JA]', 'KR': '[KR]'} - - -def tag_cjke(text): - '''为中英日韩加tag,中日正则分不开,故先分句分离中日再识别,以应对大部分情况''' - sentences = re.split(r"([.。!!??;;,,、::'\"‘“”’()()【】《》「」~——]+ *(?![0-9]))", text) # 分句,排除小数点 - sentences.append("") - sentences = ["".join(i) for i in zip(sentences[0::2], sentences[1::2])] - # print(sentences) - prev_lang = None - tagged_text = "" - for s in sentences: - # 全为符号跳过 - nu = re.sub(r'[\s\p{P}]+', '', s, flags=re.U).strip() - if len(nu) == 0: - continue - s = re.sub(r'[()()《》「」【】‘“”’]+', '', s) - jp = re.findall(jp_pattern, s) - # 本句含日语字符判断为日语 - if len(jp) > 0: - prev_lang, tagged_jke = tag_jke(s, prev_lang) - tagged_text += tagged_jke - else: - prev_lang, tagged_cke = tag_cke(s, prev_lang) - tagged_text += tagged_cke - return tagged_text - - -def tag_jke(text, prev_sentence=None): - '''为英日韩加tag''' - # 初始化标记变量 - tagged_text = "" - prev_lang = None - tagged = 0 - # 遍历文本 - for char in text: - # 判断当前字符属于哪种语言 - if jp_pattern.match(char): - lang = "JP" - elif zh_pattern.match(char): - lang = "JP" - elif kr_pattern.match(char): - lang = "KR" - elif en_pattern.match(char): - lang = "EN" - # elif num_pattern.match(char): - # lang = prev_sentence - else: - lang = None - tagged_text += char - continue - # 如果当前语言与上一个语言不同,就添加标记 - if lang != prev_lang: - tagged = 1 - if prev_lang == None: # 开头 - tagged_text = tags[lang] + tagged_text - else: - tagged_text = tagged_text + tags[prev_lang] + tags[lang] - - # 重置标记变量 - prev_lang = lang - - # 添加当前字符到标记文本中 - tagged_text += char - - # 在最后一个语言的结尾添加对应的标记 - if prev_lang: - tagged_text += tags[prev_lang] - if not tagged: - prev_lang = prev_sentence - tagged_text = tags[prev_lang] + tagged_text + tags[prev_lang] - - return prev_lang, tagged_text - - -def tag_cke(text, prev_sentence=None): - '''为中英韩加tag''' - # 初始化标记变量 - tagged_text = "" - prev_lang = None - # 是否全略过未标签 - tagged = 0 - - # 遍历文本 - for char in text: - # 判断当前字符属于哪种语言 - if zh_pattern.match(char): - lang = "ZH" - elif kr_pattern.match(char): - lang = "KR" - elif en_pattern.match(char): - lang = "EN" - # elif num_pattern.match(char): - # lang = prev_sentence - else: - # 略过 - lang = None - tagged_text += char - continue - - # 如果当前语言与上一个语言不同,添加标记 - if lang != prev_lang: - tagged = 1 - if prev_lang == None: # 开头 - tagged_text = tags[lang] + tagged_text - else: - tagged_text = tagged_text + tags[prev_lang] + tags[lang] - - # 重置标记变量 - prev_lang = lang - - # 添加当前字符到标记文本中 - tagged_text += char - - # 在最后一个语言的结尾添加对应的标记 - if prev_lang: - tagged_text += tags[prev_lang] - # 未标签则继承上一句标签 - if tagged == 0: - prev_lang = prev_sentence - tagged_text = tags[prev_lang] + tagged_text + tags[prev_lang] - return prev_lang, tagged_text - - -def load_checkpoint(checkpoint_path, model, optimizer=None, drop_speaker_emb=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - if k == 'emb_g.weight': - if drop_speaker_emb: - new_state_dict[k] = v - continue - v[:saved_state_dict[k].shape[0], :] = saved_state_dict[k] - new_state_dict[k] = v - else: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict() if optimizer is not None else None, - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def extract_digits(f): - digits = "".join(filter(str.isdigit, f)) - return int(digits) if digits else -1 - - -def latest_checkpoint_path(dir_path, regex="G_[0-9]*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: extract_digits(f)) - x = f_list[-1] - print(f"latest_checkpoint_path:{x}") - return x - - -def oldest_checkpoint_path(dir_path, regex="G_[0-9]*.pth", preserved=4): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: extract_digits(f)) - if len(f_list) > preserved: - x = f_list[0] - print(f"oldest_checkpoint_path:{x}") - return x - return "" - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def str2bool(v): - if isinstance(v, bool): - return v - if v.lower() in ('yes', 'true', 't', 'y', '1'): - return True - elif v.lower() in ('no', 'false', 'f', 'n', '0'): - return False - else: - raise argparse.ArgumentTypeError('Boolean value expected.') - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/modified_finetune_speaker.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="pretrained_models", - help='Model name') - parser.add_argument('-n', '--max_epochs', type=int, default=50, - help='finetune epochs') - parser.add_argument('--cont', type=str2bool, default=False, - help='whether to continue training on the latest checkpoint') - parser.add_argument('--drop_speaker_embed', type=str2bool, default=False, - help='whether to drop existing characters') - parser.add_argument('--train_with_pretrained_model', type=str2bool, default=True, - help='whether to train with pretrained model') - parser.add_argument('--preserved', type=int, default=4, - help='Number of preserved models') - - args = parser.parse_args() - model_dir = os.path.join("./", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.max_epochs = args.max_epochs - hparams.cont = args.cont - hparams.drop_speaker_embed = args.drop_speaker_embed - hparams.train_with_pretrained_model = args.train_with_pretrained_model - hparams.preserved = args.preserved - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/dotmet/Real-ESRGAN-Enhanced-Anime-Diffusion/interface.py b/spaces/dotmet/Real-ESRGAN-Enhanced-Anime-Diffusion/interface.py deleted file mode 100644 index 6e279a2c1e37f066669920b229665aba68ea8017..0000000000000000000000000000000000000000 --- a/spaces/dotmet/Real-ESRGAN-Enhanced-Anime-Diffusion/interface.py +++ /dev/null @@ -1,140 +0,0 @@ -import cv2 -from PIL import Image -import glob -import os -from basicsr.archs.rrdbnet_arch import RRDBNet -from basicsr.utils.download_util import load_file_from_url - -from realesrgan import RealESRGANer -from realesrgan.archs.srvgg_arch import SRVGGNetCompact - -def realEsrgan(model_name="RealESRGAN_x4plus_anime_6B", - model_path = None, - input_dir = 'inputs', - output_dir = 'results', - denoise_strength = 0.5, - outscale = 4, - suffix = 'out', - tile = 200, - tile_pad = 10, - pre_pad = 0, - face_enhance = True, - alpha_upsampler = 'realsrgan', - out_ext = 'auto', - fp32 = True, - gpu_id = None, - ): - - # determine models according to model names - model_name = model_name.split('.')[0] - if model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - netscale = 4 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth'] - elif model_name == 'RealESRNet_x4plus': # x4 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - netscale = 4 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth'] - elif model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) - netscale = 4 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth'] - elif model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - netscale = 2 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth'] - elif model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size) - model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') - netscale = 4 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'] - elif model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size) - model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') - netscale = 4 - file_url = [ - 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth', - 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth' - ] - - # determine model paths - if model_path is None: - model_path = os.path.join('weights', model_name + '.pth') - if not os.path.isfile(model_path): - ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) - for url in file_url: - # model_path will be updated - model_path = load_file_from_url( - url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None) - - # use dni to control the denoise strength - dni_weight = None - if model_name == 'realesr-general-x4v3' and denoise_strength != 1: - wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3') - model_path = [model_path, wdn_model_path] - dni_weight = [denoise_strength, 1 - denoise_strength] - - # restorer - upsampler = RealESRGANer( - scale=netscale, - model_path=model_path, - dni_weight=dni_weight, - model=model, - tile=tile, - tile_pad=tile_pad, - pre_pad=pre_pad, - half=not fp32, - gpu_id=gpu_id) - - if face_enhance: # Use GFPGAN for face enhancement - from gfpgan import GFPGANer - face_enhancer = GFPGANer( - model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', - upscale=outscale, - arch='clean', - channel_multiplier=2, - bg_upsampler=upsampler) - os.makedirs(output_dir, exist_ok=True) - - if os.path.isfile(input_dir): - paths = [input_dir] - else: - paths = sorted(glob.glob(os.path.join(input_dir, '*'))) - - Imgs = [] - for idx, path in enumerate(paths): - imgname, extension = os.path.splitext(os.path.basename(path)) - print(f'Scaling x{outscale}:', path) - - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if len(img.shape) == 3 and img.shape[2] == 4: - img_mode = 'RGBA' - else: - img_mode = None - - try: - if face_enhance: - _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) - else: - output, _ = upsampler.enhance(img, outscale=outscale) - except RuntimeError as error: - print('Error', error) - print('If you encounter CUDA or RAM out of memory, try to set --tile with a smaller number.') - else: - if out_ext == 'auto': - extension = extension[1:] - else: - extension = out_ext - if img_mode == 'RGBA': # RGBA images should be saved in png format - extension = 'png' - if suffix == '': - save_path = os.path.join(output_dir, f'{imgname}.{extension}') - else: - save_path = os.path.join(output_dir, f'{imgname}_{suffix}.{extension}') - - cv2.imwrite(save_path, output) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = Image.fromarray(img) - Imgs.append(img) - - return Imgs - diff --git a/spaces/duong11111/ChatGPT4.0/app.py b/spaces/duong11111/ChatGPT4.0/app.py deleted file mode 100644 index 7e09e57ef928fd2451fd0ed1295d0994ca75d026..0000000000000000000000000000000000000000 --- a/spaces/duong11111/ChatGPT4.0/app.py +++ /dev/null @@ -1,193 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Huggingface provided GPT4 OpenAI API Key -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") - -#Inferenec function -def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" - } - print(f"system message is ^^ {system_msg}") - if system_msg.strip() == '': - initial_message = [{"role": "user", "content": f"{inputs}"},] - multi_turn_message = [] - else: - initial_message= [{"role": "system", "content": system_msg}, - {"role": "user", "content": f"{inputs}"},] - multi_turn_message = [{"role": "system", "content": system_msg},] - - if chat_counter == 0 : - payload = { - "model": "gpt-4", - "messages": initial_message , - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - print(f"chat_counter - {chat_counter}") - else: #if chat_counter != 0 : - messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},] - for data in chatbot: - user = {} - user["role"] = "user" - user["content"] = data[0] - assistant = {} - assistant["role"] = "assistant" - assistant["content"] = data[1] - messages.append(user) - messages.append(assistant) - temp = {} - temp["role"] = "user" - temp["content"] = inputs - messages.append(temp) - #messages - payload = { - "model": "gpt-4", - "messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0,} - - chat_counter+=1 - - history.append(inputs) - print(f"Logging : payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - print(f"Logging : response code - {response}") - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history} - -#Resetting to blank -def reset_textbox(): - return gr.update(value='') - -#to set a component as visible=False -def set_visible_false(): - return gr.update(visible=False) - -#to set a component as visible=True -def set_visible_true(): - return gr.update(visible=True) - -title = """

🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming

""" - -#display message for themes feature -theme_addon_msg = """
🌟 Discover Gradio Themes with this Demo, featuring v3.22.0! Gradio v3.23.0 also enables seamless Theme sharing. You can develop or modify a theme, and send it to the hub using simple theme.push_to_hub(). -
🏆Participate in Gradio's Theme Building Hackathon to exhibit your creative flair and win fabulous rewards! Join here - Gradio-Themes-Party🎨 🏆
-""" - -#Using info to add additional information about System message in GPT4 -system_msg_info = """A conversation could begin with a system message to gently instruct the assistant. -System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'""" - -#Modifying existing Gradio Theme -theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green", - text_size=gr.themes.sizes.text_lg) - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""

🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌

""") - gr.HTML(theme_addon_msg) - gr.HTML('''
Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
''') - - with gr.Column(elem_id = "col_container"): - #GPT4 API Key is provided by Huggingface - with gr.Accordion(label="System message:", open=False): - system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="") - accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False) - chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot") - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") - state = gr.State([]) - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button().style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #top_p, temperature - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - #Event handling - inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - - inputs.submit(set_visible_false, [], [system_msg]) - b1.click(set_visible_false, [], [system_msg]) - inputs.submit(set_visible_true, [], [accordion_msg]) - b1.click(set_visible_true, [], [accordion_msg]) - - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #Examples - with gr.Accordion(label="Examples for System message:", open=False): - gr.Examples( - examples = [["""You are an AI programming assistant. - - - Follow the user's requirements carefully and to the letter. - - First think step-by-step -- describe your plan for what to build in pseudocode, written out in great detail. - - Then output the code in a single code block. - - Minimize any other prose."""], ["""You are ComedianGPT who is a helpful assistant. You answer everything with a joke and witty replies."""], - ["You are ChefGPT, a helpful assistant who answers questions with culinary expertise and a pinch of humor."], - ["You are FitnessGuruGPT, a fitness expert who shares workout tips and motivation with a playful twist."], - ["You are SciFiGPT, an AI assistant who discusses science fiction topics with a blend of knowledge and wit."], - ["You are PhilosopherGPT, a thoughtful assistant who responds to inquiries with philosophical insights and a touch of humor."], - ["You are EcoWarriorGPT, a helpful assistant who shares environment-friendly advice with a lighthearted approach."], - ["You are MusicMaestroGPT, a knowledgeable AI who discusses music and its history with a mix of facts and playful banter."], - ["You are SportsFanGPT, an enthusiastic assistant who talks about sports and shares amusing anecdotes."], - ["You are TechWhizGPT, a tech-savvy AI who can help users troubleshoot issues and answer questions with a dash of humor."], - ["You are FashionistaGPT, an AI fashion expert who shares style advice and trends with a sprinkle of wit."], - ["You are ArtConnoisseurGPT, an AI assistant who discusses art and its history with a blend of knowledge and playful commentary."], - ["You are a helpful assistant that provides detailed and accurate information."], - ["You are an assistant that speaks like Shakespeare."], - ["You are a friendly assistant who uses casual language and humor."], - ["You are a financial advisor who gives expert advice on investments and budgeting."], - ["You are a health and fitness expert who provides advice on nutrition and exercise."], - ["You are a travel consultant who offers recommendations for destinations, accommodations, and attractions."], - ["You are a movie critic who shares insightful opinions on films and their themes."], - ["You are a history enthusiast who loves to discuss historical events and figures."], - ["You are a tech-savvy assistant who can help users troubleshoot issues and answer questions about gadgets and software."], - ["You are an AI poet who can compose creative and evocative poems on any given topic."],], - inputs = system_msg,) - -demo.queue(max_size=99, concurrency_count=20).launch(debug=True) \ No newline at end of file diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py deleted file mode 100644 index af04e614c8f1ac43faf363b1a9f6bfd667fbde21..0000000000000000000000000000000000000000 --- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py +++ /dev/null @@ -1,201 +0,0 @@ -import torch -import commons -import models - -import math -from torch import nn -from torch.nn import functional as F - -import modules -import attentions - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emotion_embedding = emotion_embedding - - if self.n_vocab != 0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - if emotion_embedding: - self.emo_proj = nn.Linear(1024, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, emotion_embedding=None): - if self.n_vocab != 0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - if emotion_embedding is not None: - print("emotion added") - x = x + self.emo_proj(emotion_embedding.unsqueeze(1)) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class SynthesizerTrn(models.SynthesizerTrn): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - emotion_embedding=False, - ONNX_dir="./ONNX_net/", - **kwargs): - - super().__init__( - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=n_speakers, - gin_channels=gin_channels, - use_sdp=use_sdp, - **kwargs - ) - self.ONNX_dir = ONNX_dir - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, - emotion_embedding=None): - from ONNXVITS_utils import runonnx - with torch.no_grad(): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - # logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - logw = runonnx(f"{self.ONNX_dir}dp.onnx", x=x.numpy(), x_mask=x_mask.numpy(), g=g.numpy()) - logw = torch.from_numpy(logw[0]) - - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - # z = self.flow(z_p, y_mask, g=g, reverse=True) - z = runonnx(f"{self.ONNX_dir}flow.onnx", z_p=z_p.numpy(), y_mask=y_mask.numpy(), g=g.numpy()) - z = torch.from_numpy(z[0]) - - # o = self.dec((z * y_mask)[:,:,:max_len], g=g) - o = runonnx(f"{self.ONNX_dir}dec.onnx", z_in=(z * y_mask)[:, :, :max_len].numpy(), g=g.numpy()) - o = torch.from_numpy(o[0]) - - return o, attn, y_mask, (z, z_p, m_p, logs_p) \ No newline at end of file diff --git a/spaces/editing-images/project/static/js/bulma-carousel.min.js b/spaces/editing-images/project/static/js/bulma-carousel.min.js deleted file mode 100644 index 5fff0695f00cf9da60dd87aa72c51367b00e92ff..0000000000000000000000000000000000000000 --- a/spaces/editing-images/project/static/js/bulma-carousel.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaCarousel=e():t.bulmaCarousel=e()}("undefined"!=typeof self?self:this,function(){return function(i){var n={};function s(t){if(n[t])return n[t].exports;var e=n[t]={i:t,l:!1,exports:{}};return i[t].call(e.exports,e,e.exports,s),e.l=!0,e.exports}return s.m=i,s.c=n,s.d=function(t,e,i){s.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:i})},s.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return s.d(e,"a",e),e},s.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},s.p="",s(s.s=5)}([function(t,e,i){"use strict";i.d(e,"d",function(){return s}),i.d(e,"e",function(){return r}),i.d(e,"b",function(){return o}),i.d(e,"c",function(){return a}),i.d(e,"a",function(){return l});var n=i(2),s=function(e,t){(t=Array.isArray(t)?t:t.split(" ")).forEach(function(t){e.classList.remove(t)})},r=function(t){return t.getBoundingClientRect().width||t.offsetWidth},o=function(t){return t.getBoundingClientRect().height||t.offsetHeight},a=function(t){var e=1=t._x&&this._x<=e._x&&this._y>=t._y&&this._y<=e._y}},{key:"constrain",value:function(t,e){if(t._x>e._x||t._y>e._y)return this;var i=this._x,n=this._y;return null!==t._x&&(i=Math.max(i,t._x)),null!==e._x&&(i=Math.min(i,e._x)),null!==t._y&&(n=Math.max(n,t._y)),null!==e._y&&(n=Math.min(n,e._y)),new s(i,n)}},{key:"reposition",value:function(t){t.style.top=this._y+"px",t.style.left=this._x+"px"}},{key:"toString",value:function(){return"("+this._x+","+this._y+")"}},{key:"x",get:function(){return this._x},set:function(){var t=0this.state.length-this.slidesToShow&&!this.options.centerMode?this.state.next=this.state.index:this.state.next=this.state.index+this.slidesToScroll,this.show()}},{key:"previous",value:function(){this.options.loop||this.options.infinite||0!==this.state.index?this.state.next=this.state.index-this.slidesToScroll:this.state.next=this.state.index,this.show()}},{key:"start",value:function(){this._autoplay.start()}},{key:"pause",value:function(){this._autoplay.pause()}},{key:"stop",value:function(){this._autoplay.stop()}},{key:"show",value:function(t){var e=1this.options.slidesToShow&&(this.options.slidesToScroll=this.slidesToShow),this._breakpoint.init(),this.state.index>=this.state.length&&0!==this.state.index&&(this.state.index=this.state.index-this.slidesToScroll),this.state.length<=this.slidesToShow&&(this.state.index=0),this._ui.wrapper.appendChild(this._navigation.init().render()),this._ui.wrapper.appendChild(this._pagination.init().render()),this.options.navigationSwipe?this._swipe.bindEvents():this._swipe._bindEvents(),this._breakpoint.apply(),this._slides.forEach(function(t){return e._ui.container.appendChild(t)}),this._transitioner.init().apply(!0,this._setHeight.bind(this)),this.options.autoplay&&this._autoplay.init().start()}},{key:"destroy",value:function(){var e=this;this._unbindEvents(),this._items.forEach(function(t){e.element.appendChild(t)}),this.node.remove()}},{key:"id",get:function(){return this._id}},{key:"index",set:function(t){this._index=t},get:function(){return this._index}},{key:"length",set:function(t){this._length=t},get:function(){return this._length}},{key:"slides",get:function(){return this._slides},set:function(t){this._slides=t}},{key:"slidesToScroll",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToScroll():1}},{key:"slidesToShow",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToShow():1}},{key:"direction",get:function(){return"rtl"===this.element.dir.toLowerCase()||"rtl"===this.element.style.direction?"rtl":"ltr"}},{key:"wrapper",get:function(){return this._ui.wrapper}},{key:"wrapperWidth",get:function(){return this._wrapperWidth||0}},{key:"container",get:function(){return this._ui.container}},{key:"containerWidth",get:function(){return this._containerWidth||0}},{key:"slideWidth",get:function(){return this._slideWidth||0}},{key:"transitioner",get:function(){return this._transitioner}}],[{key:"attach",value:function(){var i=this,t=0>t/4).toString(16)})}},function(t,e,i){"use strict";var n=i(3),s=i(8),r=function(){function n(t,e){for(var i=0;i=t.slider.state.length-t.slider.slidesToShow&&!t.slider.options.loop&&!t.slider.options.infinite?t.stop():t.slider.next())},this.slider.options.autoplaySpeed))}},{key:"stop",value:function(){this._interval=clearInterval(this._interval),this.emit("stop",this)}},{key:"pause",value:function(){var t=this,e=0parseInt(e.changePoint,10)}),this._currentBreakpoint=this._getActiveBreakpoint(),this}},{key:"destroy",value:function(){this._unbindEvents()}},{key:"_bindEvents",value:function(){window.addEventListener("resize",this[s]),window.addEventListener("orientationchange",this[s])}},{key:"_unbindEvents",value:function(){window.removeEventListener("resize",this[s]),window.removeEventListener("orientationchange",this[s])}},{key:"_getActiveBreakpoint",value:function(){var t=!0,e=!1,i=void 0;try{for(var n,s=this.options.breakpoints[Symbol.iterator]();!(t=(n=s.next()).done);t=!0){var r=n.value;if(r.changePoint>=window.innerWidth)return r}}catch(t){e=!0,i=t}finally{try{!t&&s.return&&s.return()}finally{if(e)throw i}}return this._defaultBreakpoint}},{key:"getSlidesToShow",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToShow:this._defaultBreakpoint.slidesToShow}},{key:"getSlidesToScroll",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToScroll:this._defaultBreakpoint.slidesToScroll}},{key:"apply",value:function(){this.slider.state.index>=this.slider.state.length&&0!==this.slider.state.index&&(this.slider.state.index=this.slider.state.index-this._currentBreakpoint.slidesToScroll),this.slider.state.length<=this._currentBreakpoint.slidesToShow&&(this.slider.state.index=0),this.options.loop&&this.slider._loop.init().apply(),this.options.infinite&&this.slider._infinite.init().apply(),this.slider._setDimensions(),this.slider._transitioner.init().apply(!0,this.slider._setHeight.bind(this.slider)),this.slider._setClasses(),this.slider._navigation.refresh(),this.slider._pagination.refresh()}},{key:s,value:function(t){var e=this._getActiveBreakpoint();e.slidesToShow!==this._currentBreakpoint.slidesToShow&&(this._currentBreakpoint=e,this.apply())}}]),e}();e.a=r},function(t,e,i){"use strict";var n=function(){function n(t,e){for(var i=0;ithis.slider.state.length-1-this._infiniteCount;i-=1)e=i-1,t.unshift(this._cloneSlide(this.slider.slides[e],e-this.slider.state.length));for(var n=[],s=0;s=this.slider.state.length?(this.slider.state.index=this.slider.state.next=this.slider.state.next-this.slider.state.length,this.slider.transitioner.apply(!0)):this.slider.state.next<0&&(this.slider.state.index=this.slider.state.next=this.slider.state.length+this.slider.state.next,this.slider.transitioner.apply(!0)))}},{key:"_cloneSlide",value:function(t,e){var i=t.cloneNode(!0);return i.dataset.sliderIndex=e,i.dataset.cloned=!0,(i.querySelectorAll("[id]")||[]).forEach(function(t){t.setAttribute("id","")}),i}}]),e}();e.a=s},function(t,e,i){"use strict";var n=i(12),s=function(){function n(t,e){for(var i=0;ithis.slider.state.length-this.slider.slidesToShow&&Object(n.a)(this.slider._slides[this.slider.state.length-1],this.slider.wrapper)?this.slider.state.next=0:this.slider.state.next=Math.min(Math.max(this.slider.state.next,0),this.slider.state.length-this.slider.slidesToShow):this.slider.state.next=0:this.slider.state.next<=0-this.slider.slidesToScroll?this.slider.state.next=this.slider.state.length-this.slider.slidesToShow:this.slider.state.next=0)}}]),e}();e.a=r},function(t,e,i){"use strict";i.d(e,"a",function(){return n});var n=function(t,e){var i=t.getBoundingClientRect();return e=e||document.documentElement,0<=i.top&&0<=i.left&&i.bottom<=(window.innerHeight||e.clientHeight)&&i.right<=(window.innerWidth||e.clientWidth)}},function(t,e,i){"use strict";var n=i(14),s=i(1),r=function(){function n(t,e){for(var i=0;ithis.slider.slidesToShow?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.remove("is-hidden"),0===this.slider.state.next?(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.remove("is-hidden")):this.slider.state.next>=this.slider.state.length-this.slider.slidesToShow&&!this.slider.options.centerMode?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden")):this.slider.state.next>=this.slider.state.length-1&&this.slider.options.centerMode&&(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden"))):(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.add("is-hidden")))}},{key:"render",value:function(){return this.node}}]),e}();e.a=o},function(t,e,i){"use strict";e.a=function(t){return'
'+t.previous+'
\n
'+t.next+"
"}},function(t,e,i){"use strict";var n=i(16),s=i(17),r=i(1),o=function(){function n(t,e){for(var i=0;ithis.slider.slidesToShow){for(var t=0;t<=this._count;t++){var e=document.createRange().createContextualFragment(Object(s.a)()).firstChild;e.dataset.index=t*this.slider.slidesToScroll,this._pages.push(e),this._ui.container.appendChild(e)}this._bindEvents()}}},{key:"onPageClick",value:function(t){this._supportsPassive||t.preventDefault(),this.slider.state.next=t.currentTarget.dataset.index,this.slider.show()}},{key:"onResize",value:function(){this._draw()}},{key:"refresh",value:function(){var e=this,t=void 0;(t=this.slider.options.infinite?Math.ceil(this.slider.state.length-1/this.slider.slidesToScroll):Math.ceil((this.slider.state.length-this.slider.slidesToShow)/this.slider.slidesToScroll))!==this._count&&(this._count=t,this._draw()),this._pages.forEach(function(t){t.classList.remove("is-active"),parseInt(t.dataset.index,10)===e.slider.state.next%e.slider.state.length&&t.classList.add("is-active")})}},{key:"render",value:function(){return this.node}}]),e}();e.a=a},function(t,e,i){"use strict";e.a=function(){return'
'}},function(t,e,i){"use strict";e.a=function(){return'
'}},function(t,e,i){"use strict";var n=i(4),s=i(1),r=function(){function n(t,e){for(var i=0;iMath.abs(this._lastTranslate.y)&&(this._supportsPassive||t.preventDefault(),t.stopPropagation())}}},{key:"onStopDrag",value:function(t){this._origin&&this._lastTranslate&&(Math.abs(this._lastTranslate.x)>.2*this.width?this._lastTranslate.x<0?this.slider.next():this.slider.previous():this.slider.show(!0)),this._origin=null,this._lastTranslate=null}}]),e}();e.a=o},function(t,e,i){"use strict";var n=i(20),s=i(21),r=function(){function n(t,e){for(var i=0;it.x?(s.x=0,this.slider.state.next=0):this.options.vertical&&Math.abs(this._position.y)>t.y&&(s.y=0,this.slider.state.next=0)),this._position.x=s.x,this._position.y=s.y,this.options.centerMode&&(this._position.x=this._position.x+this.slider.wrapperWidth/2-Object(o.e)(i)/2),"rtl"===this.slider.direction&&(this._position.x=-this._position.x,this._position.y=-this._position.y),this.slider.container.style.transform="translate3d("+this._position.x+"px, "+this._position.y+"px, 0)",n.x>t.x&&this.slider.transitioner.end()}}},{key:"onTransitionEnd",value:function(t){"translate"===this.options.effect&&(this.transitioner.isAnimating()&&t.target==this.slider.container&&this.options.infinite&&this.slider._infinite.onTransitionEnd(t),this.transitioner.end())}}]),n}();e.a=n},function(t,e,i){"use strict";e.a={initialSlide:0,slidesToScroll:1,slidesToShow:1,navigation:!0,navigationKeys:!0,navigationSwipe:!0,pagination:!0,loop:!1,infinite:!1,effect:"translate",duration:300,timing:"ease",autoplay:!1,autoplaySpeed:3e3,pauseOnHover:!0,breakpoints:[{changePoint:480,slidesToShow:1,slidesToScroll:1},{changePoint:640,slidesToShow:2,slidesToScroll:2},{changePoint:768,slidesToShow:3,slidesToScroll:3}],onReady:null,icons:{previous:'\n \n ',next:'\n \n '}}},function(t,e,i){"use strict";e.a=function(t){return'
\n
\n
'}},function(t,e,i){"use strict";e.a=function(){return'
'}}]).default}); \ No newline at end of file diff --git a/spaces/elplaguister/Yuuka_TTS/src/modules.py b/spaces/elplaguister/Yuuka_TTS/src/modules.py deleted file mode 100644 index 4036479a37599788c49b02225e5dd88107ff11d9..0000000000000000000000000000000000000000 --- a/spaces/elplaguister/Yuuka_TTS/src/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from src import commons -from src.commons import init_weights, get_padding -from src.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/evaluate-metric/squad/README.md b/spaces/evaluate-metric/squad/README.md deleted file mode 100644 index 08e030c599698bd0bdf7aa986f1bc0c14bb792cf..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/squad/README.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: SQuAD -emoji: 🤗 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -tags: -- evaluate -- metric -description: >- - This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). - - Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by - crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, - from the corresponding reading passage, or the question might be unanswerable. ---- - -# Metric Card for SQuAD - -## Metric description -This metric wraps the official scoring script for version 1 of the [Stanford Question Answering Dataset (SQuAD)](https://huggingface.co/datasets/squad). - -SQuAD is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. - -## How to use - -The metric takes two files or two lists of question-answers dictionaries as inputs : one with the predictions of the model and the other with the references to be compared to: - -```python -from evaluate import load -squad_metric = load("squad") -results = squad_metric.compute(predictions=predictions, references=references) -``` -## Output values - -This metric outputs a dictionary with two values: the average exact match score and the average [F1 score](https://huggingface.co/metrics/f1). - -``` -{'exact_match': 100.0, 'f1': 100.0} -``` - -The range of `exact_match` is 0-100, where 0.0 means no answers were matched and 100.0 means all answers were matched. - -The range of `f1` is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall. - -### Values from popular papers -The [original SQuAD paper](https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf) reported an F1 score of 51.0% and an Exact Match score of 40.0%. They also report that human performance on the dataset represents an F1 score of 90.5% and an Exact Match score of 80.3%. - -For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/squad). - -## Examples - -Maximal values for both exact match and F1 (perfect match): - -```python -from evaluate import load -squad_metric = load("squad") -predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] -references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] -results = squad_metric.compute(predictions=predictions, references=references) -results -{'exact_match': 100.0, 'f1': 100.0} -``` - -Minimal values for both exact match and F1 (no match): - -```python -from evaluate import load -squad_metric = load("squad") -predictions = [{'prediction_text': '1999', 'id': '56e10a3be3433e1400422b22'}] -references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] -results = squad_metric.compute(predictions=predictions, references=references) -results -{'exact_match': 0.0, 'f1': 0.0} -``` - -Partial match (2 out of 3 answers correct) : - -```python -from evaluate import load -squad_metric = load("squad") -predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}, {'prediction_text': 'Beyonce', 'id': '56d2051ce7d4791d0090260b'}, {'prediction_text': 'climate change', 'id': '5733b5344776f419006610e1'}] -references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}, {'answers': {'answer_start': [233], 'text': ['Beyoncé and Bruno Mars']}, 'id': '56d2051ce7d4791d0090260b'}, {'answers': {'answer_start': [891], 'text': ['climate change']}, 'id': '5733b5344776f419006610e1'}] -results = squad_metric.compute(predictions=predictions, references=references) -results -{'exact_match': 66.66666666666667, 'f1': 66.66666666666667} -``` - -## Limitations and bias -This metric works only with datasets that have the same format as [SQuAD v.1 dataset](https://huggingface.co/datasets/squad). - -The SQuAD dataset does contain a certain amount of noise, such as duplicate questions as well as missing answers, but these represent a minority of the 100,000 question-answer pairs. Also, neither exact match nor F1 score reflect whether models do better on certain types of questions (e.g. who questions) or those that cover a certain gender or geographical area -- carrying out more in-depth error analysis can complement these numbers. - - -## Citation - - @inproceedings{Rajpurkar2016SQuAD10, - title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, - author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, - booktitle={EMNLP}, - year={2016} - } - -## Further References - -- [The Stanford Question Answering Dataset: Background, Challenges, Progress (blog post)](https://rajpurkar.github.io/mlx/qa-and-squad/) -- [Hugging Face Course -- Question Answering](https://huggingface.co/course/chapter7/7) diff --git a/spaces/failfast/2D-GameCreator/Dockerfile b/spaces/failfast/2D-GameCreator/Dockerfile deleted file mode 100644 index a6a6a69b5ff641c6360aec9fc08cb8df9dbad434..0000000000000000000000000000000000000000 --- a/spaces/failfast/2D-GameCreator/Dockerfile +++ /dev/null @@ -1,63 +0,0 @@ -FROM node:18-alpine AS base - -# Install dependencies only when needed -FROM base AS deps -# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. -RUN apk add --no-cache libc6-compat -WORKDIR /app - -# Install dependencies based on the preferred package manager -COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./ -RUN \ - if [ -f yarn.lock ]; then yarn --frozen-lockfile; \ - elif [ -f package-lock.json ]; then npm ci; \ - elif [ -f pnpm-lock.yaml ]; then yarn global add pnpm && pnpm i --frozen-lockfile; \ - else echo "Lockfile not found." && exit 1; \ - fi - -# Uncomment the following lines if you want to use a secret at buildtime, -# for example to access your private npm packages -# RUN --mount=type=secret,id=HF_EXAMPLE_SECRET,mode=0444,required=true \ -# $(cat /run/secrets/HF_EXAMPLE_SECRET) - -# Rebuild the source code only when needed -FROM base AS builder -WORKDIR /app -COPY --from=deps /app/node_modules ./node_modules -COPY . . - -# Next.js collects completely anonymous telemetry data about general usage. -# Learn more here: https://nextjs.org/telemetry -# Uncomment the following line in case you want to disable telemetry during the build. -# ENV NEXT_TELEMETRY_DISABLED 1 - -# RUN yarn build - -# If you use yarn, comment out this line and use the line above -RUN npm run build - -# Production image, copy all the files and run next -FROM base AS runner -WORKDIR /app - -ENV NODE_ENV production -# Uncomment the following line in case you want to disable telemetry during runtime. -# ENV NEXT_TELEMETRY_DISABLED 1 - -RUN addgroup --system --gid 1001 nodejs -RUN adduser --system --uid 1001 nextjs - -COPY --from=builder /app/public ./public - -# Automatically leverage output traces to reduce image size -# https://nextjs.org/docs/advanced-features/output-file-tracing -COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ -COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static - -USER nextjs - -EXPOSE 3000 - -ENV PORT 3000 - -CMD ["node", "server.js"] diff --git a/spaces/falterWliame/Face_Mask_Detection/Divinity Original Sin 2 Adult Mod.md b/spaces/falterWliame/Face_Mask_Detection/Divinity Original Sin 2 Adult Mod.md deleted file mode 100644 index 69cfc16d8a945d3bea71c2ae9530bbea6a3dfa21..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Divinity Original Sin 2 Adult Mod.md +++ /dev/null @@ -1,6 +0,0 @@ -

Divinity Original Sin 2 Adult Mod


Download File >>>>> https://urlca.com/2uDdMH



-
-Divinity Original Sin 2 Graphics Mod – HDR MOD / SweetFX ... Divinity Original Sin 2 PC ... Divinity 2 mods - Adult Gaming - LoversLab. www.loverslab.com. 4d29de3e1b
-
-
-

diff --git a/spaces/fatiXbelha/sd/Apk Mulung Koin TikTok Terbaru 2023 Cara Gampang Dapat Uang dari TikTok.md b/spaces/fatiXbelha/sd/Apk Mulung Koin TikTok Terbaru 2023 Cara Gampang Dapat Uang dari TikTok.md deleted file mode 100644 index d0a560aad0cc47fc470c1533a0566ef6971c5b35..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Apk Mulung Koin TikTok Terbaru 2023 Cara Gampang Dapat Uang dari TikTok.md +++ /dev/null @@ -1,100 +0,0 @@ -
-

Apk Mulung Koin TikTok: What Is It and How to Use It

-

TikTok is one of the most popular social media platforms in Indonesia, with millions of users creating and sharing short videos every day. But did you know that you can also make money from TikTok? One way to do that is by using TikTok Coins, the in-app currency that you can buy or earn from other users. However, some people are looking for shortcuts to get free coins, such as using a modded application called Apk Mulung Koin TikTok. But what is this application, how does it work, and what are the risks and drawbacks of using it? In this article, we will explain everything you need to know about Apk Mulung Koin TikTok, and provide some alternatives to get coins legally and safely.

-

apk mulung koin tiktok


DOWNLOADhttps://urllie.com/2uNEll



-

What Is TikTok and Why Is It Popular in Indonesia

-

TikTok is a social media platform that allows users to create and share short videos, usually between 15 to 60 seconds long. Users can choose from a variety of filters, effects, stickers, music, sounds, and hashtags to make their videos more fun and creative. Users can also watch videos from other users, follow their favorite creators, comment, like, share, and chat with them.

-

TikTok has various features and content categories that appeal to different audiences. For example, some users like to watch or make videos about comedy, dance, beauty, fashion, food, sports, education, travel, pets, art, gaming, and more. Some users also like to join challenges, duets, trends, or viral moments that are popular on the platform. Some users also like to use TikTok as a source of information, inspiration, entertainment, or social interaction.

-

TikTok has a large and active user base in Indonesia, especially among young people. According to a report by App Annie in June 2020, Indonesia ranked as the second-largest market for TikTok downloads globally. According to another report by We Are Social

and Hootsuite in January 2020, Indonesia had 81.7 million active social media users, of which 22.1 million were TikTok users. This means that TikTok had a penetration rate of 27.1% among the social media users in Indonesia, making it the fourth most popular social media platform in the country, after Facebook, Instagram, and YouTube. TikTok also ranked as the second most downloaded app in Indonesia in 2020, according to App Annie.

-

One of the reasons why TikTok is so popular in Indonesia is because it offers a platform for users to express themselves creatively and authentically, without being limited by language, culture, or location barriers. TikTok also provides a variety of content that caters to different interests and preferences, from comedy and music to education and social issues. TikTok also enables users to connect with each other and form communities based on shared passions and values.

-

What Are TikTok Coins and How to Get Them

-

TikTok Coins are the in-app currency that users can buy with real money or earn from other users. Users can buy coins through the official TikTok app, using various payment methods such as credit cards, debit cards, or mobile wallets. The price of coins may vary depending on the exchange rate and the country of purchase. For example, in the US, 100 coins cost $1.29, while in Indonesia, 100 coins cost Rp 14,000.

-

TikTok Coins can be used to buy virtual gifts and send them to other users during livestreams. Livestreams are live video broadcasts that users can watch or create on TikTok. Users can interact with the livestreamer or other viewers through comments, likes, or gifts. Gifts are animated stickers or emojis that represent different amounts of coins. For example, a panda gift costs 5 coins, while a rainbow gift costs 100 coins. Sending gifts is a way to show appreciation, support, or admiration to the livestreamer or other users.

-

Cara dapat koin tiktok saat live streaming
-Tips dan trik mulung koin tiktok gratis
-Aplikasi khusus untuk mulung koin tiktok
-Cara klaim koin tiktok dari peti harta karun
-Cara mencairkan koin tiktok menjadi uang asli
-Syarat dan ketentuan mulung koin tiktok
-Cara meningkatkan interaksi saat live tiktok
-Cara memilih akun yang membagikan koin tiktok
-Cara mengisi saldo koin tiktok untuk top up
-Cara memasukkan kode undangan tiktok untuk bonus
-Cara mendapatkan gift dari penonton live tiktok
-Daftar harga gift dan koin tiktok terbaru
-Cara menghindari virus saat mulung koin tiktok
-Cara menggunakan mod apk mulung koin tiktok
-Keuntungan dan kerugian mulung koin tiktok
-Cara mengatasi masalah saat mulung koin tiktok
-Tutorial lengkap mulung koin tiktok untuk pemula
-Cara mendapatkan banyak follower di tiktok
-Cara membuat video tiktok yang menarik dan viral
-Cara bekerja sama dengan brand di tiktok
-Cara menjual barang melalui tiktok shop
-Cara menjual produk orang lain dengan tiktok affiliate
-Cara membagikan link referral kepada pengguna baru tiktok
-Cara mendapatkan netizen point di tiktok
-Cara mendapatkan diamond di tiktok
-Cara mendapatkan like dan comment di tiktok
-Cara mendapatkan verifikasi akun di tiktok
-Cara menghapus akun tiktok permanen atau sementara
-Cara mengganti nama pengguna dan password di tiktok
-Cara mengubah bahasa dan negara di tiktok
-Cara mengaktifkan mode gelap atau terang di tiktok
-Cara mengatur privasi dan keamanan akun di tiktok
-Cara menghapus cache dan data aplikasi tiktok
-Cara mengunduh video dari tiktok tanpa watermark
-Cara menyimpan video ke galeri atau draft di tiktok
-Cara membagikan video ke media sosial lain dari tiktok
-Cara menambahkan musik atau suara ke video di tiktok
-Cara menambahkan teks atau stiker ke video di tiktok
-Cara menambahkan efek atau filter ke video di tiktok
-Cara menambahkan transisi atau animasi ke video di tiktok
-Cara menyesuaikan durasi atau kecepatan video di tiktok
-Cara merekam video dengan timer atau countdown di tiktok
-Cara merekam video dengan hands free atau voice control di tiktok
-Cara merekam video dengan duet atau react di tiktok
-Cara merekam video dengan green screen atau chroma key di tiktok

-

TikTok Coins can also be exchanged for cash or other rewards through various methods. One of them is by converting coins into diamonds, which are another in-app currency that users can earn from receiving gifts during livestreams. One diamond is equivalent to one coin. Users can then withdraw diamonds as cash through PayPal or other third-party platforms, depending on their country and eligibility. The minimum amount of diamonds that can be withdrawn is 10,000, which is equivalent to $50. Another method is by participating in official challenges, events, or campaigns by TikTok or its partners, and earning rewards or prizes in exchange for coins.

-

What Is Apk Mulung Koin TikTok and How Does It Work

-

Apk Mulung Koin TikTok is a modded application that claims to help users get free coins from other users' livestreams. A modded application is an application that has been modified or hacked to alter its original features or functions. Apk Mulung Koin TikTok is not an official or authorized application by TikTok, and it is not available on the Google Play Store or the Apple App Store. Users have to download it from third-party websites or sources at their own risk.

-

Apk Mulung Koin TikTok works by automatically finding and joining livestreams that have coin giveaways. Coin giveaways are livestreams where the livestreamer offers to send gifts or coins to some of the viewers who join their broadcast. Apk Mulung Koin TikTok claims to be able to detect these livestreams and join them on behalf of the user, without requiring them to watch or interact with the livestreamer.

-

Apk Mulung Koin TikTok also claims to increase the chances of getting coins by tapping faster and more frequently than humanly possible. Tapping is a way to show interest or engagement during a livestream, and some livestreamers may reward their viewers who tap more often with gifts or coins. Apk Mulung Koin TikTok claims to be able to tap up to 10 times per second on the screen, which may increase the likelihood of receiving gifts or coins from the livestreamer.

-

What Are the Risks and Drawbacks of Using Apk Mulung Koin TikTok

-

Apk Mulung Koin TikTok may sound like an easy and convenient way to get free coins on TikTok, but it also comes with many risks and drawbacks that users should be aware of before using it.

-

First of all, Apk Mulung Koin TikTok is not an official or authorized application by TikTok, and may violate its terms of service and policies. By using Apk Mulung Koin TikTok, users may be breaking the rules and regulations of the platform, and may face legal consequences or penalties. For example, TikTok may ban or suspend their accounts, delete their videos, or revoke their coins or diamonds. TikTok may also take legal action against them or the developers of Apk Mulung Koin TikTok for infringing its intellectual property rights or harming its reputation.

-

Secondly, Apk Mulung Koin TikTok may contain malware or viruses that can harm your device or steal your personal information. Malware or viruses are malicious software or programs that can damage your device, corrupt your files, or access your data without your permission. By downloading Apk Mulung Koin TikTok from third-party websites or sources, you may expose your device to these risks, and compromise your security and privacy. For example, Apk Mulung Koin TikTok may collect your TikTok login credentials, access your contacts, messages, photos, videos, or other sensitive information, or install other unwanted or harmful applications on your device.

-

Thirdly, Apk Mulung Koin TikTok may not work as advertised, or may result in your account being banned or suspended by TikTok. Apk Mulung Koin TikTok may not be able to find or join livestreams that have coin giveaways, or may not be able to tap faster or more frequently than other users. Apk Mulung Koin TikTok may also be detected by TikTok's security system, which may flag your account as suspicious or fraudulent, and prevent you from receiving or withdrawing coins or diamonds. Apk Mulung Koin TikTok may also cause your device to malfunction, crash, or freeze, due to its poor quality or compatibility issues.

-

What Are the Alternatives to Apk Mulung Koin TikTok

-

If you want to get coins on TikTok without using Apk Mulung Koin TikTok, there are some alternatives that you can try instead. These alternatives are legal and safe, and do not require you to download any modded applications or risk your account or device.

-

One of them is by creating quality content and engaging with your audience on TikTok, and receiving gifts from them during livestreams. If you have a talent, skill, passion, or message that you want to share with the world, you can use TikTok as a platform to showcase it and attract followers who appreciate it. You can also interact with your followers and other users through comments, likes, shares, chats, duets, challenges, trends, and more. By doing so, you can build a loyal and supportive fan base who may reward you with gifts or coins during your livestreams.

-

Another one is by participating in official challenges, events, or campaigns by TikTok or its partners, and earning rewards or prizes in exchange for coins. TikTok often launches various challenges, events, or campaigns that invite users to create and share videos on specific topics, themes, hashtags, or causes. Some of these challenges, events, or campaigns may offer rewards or prizes to the winners or participants who submit the best videos. These rewards or prizes may include coins, cash, or other rewards that you can use or enjoy. To participate in these challenges, events, or campaigns, you may need to use coins to enter or submit your videos, but the rewards or prizes may be worth more than the coins you spend.

-

A third one is by buying coins with real money through the official TikTok app, and supporting your favorite creators or causes. If you have some spare money that you want to spend on TikTok, you can buy coins through the app using various payment methods, and use them to send gifts to other users during livestreams. By doing so, you can show your appreciation, support, or admiration to the creators or causes that you like or care about. You can also receive thank-you messages, shout-outs, or other benefits from the users who receive your gifts.

-

Conclusion

-

Apk Mulung Koin TikTok is a modded application that claims to help users get free coins from other users' livestreams on TikTok. However, it is not an official or authorized application by TikTok, and it may have many risks and drawbacks that users should be aware of before using it. Apk Mulung Koin TikTok may violate TikTok's terms of service and policies, contain malware or viruses, or result in your account being banned or suspended by TikTok. Apk Mulung Koin TikTok may also not work as advertised, or may cause your device to malfunction, crash, or freeze.

-

If you want to get coins on TikTok without using Apk Mulung Koin TikTok, there are some alternatives that you can try instead. These alternatives are legal and safe, and do not require you to download any modded applications or risk your account or device. These alternatives are creating quality content and engaging with your audience on TikTok, participating in official challenges, events, or campaigns by TikTok or its partners, and buying coins with real money through the official TikTok app.

-

We hope that this article has helped you understand what Apk Mulung Koin TikTok is and how to use it. We also hope that you have learned some alternatives to get coins on TikTok without using Apk Mulung Koin TikTok. Thank you for reading and happy TikToking!

-

FAQs

-

Here are some frequently asked questions about Apk Mulung Koin TikTok and its alternatives.

-

Q: Is Apk Mulung Koin TikTok safe to use?

-

A: No, Apk Mulung Koin TikTok is not safe to use. It is a modded application that is not an official or authorized application by TikTok. It may violate TikTok's terms of service and policies, contain malware or viruses, or result in your account being banned or suspended by TikTok. It may also not work as advertised, or may cause your device to malfunction, crash, or freeze.

-

Q: How can I download Apk Mulung Koin TikTok?

-

A: We do not recommend downloading Apk Mulung Koin TikTok because of the risks and drawbacks mentioned above. However, if you still want to download it at your own risk, you can find it on some third-party websites or sources that offer modded applications. You may need to enable unknown sources on your device settings to install it.

-

Q: How can I get coins on TikTok without using Apk Mulung Koin TikTok?

-

A: You can get coins on TikTok without using Apk Mulung Koin TikTok by creating quality content and engaging with your audience on TikTok, participating in official challenges, events, or campaigns by TikTok or its partners, or buying coins with real money through the official TikTok app. These alternatives are legal and safe, and do not require you to download any modded applications or risk your account or device.

-

Q: How can I use coins on TikTok?

-

A: You can use coins on TikTok to buy virtual gifts and send them to other users during livestreams. Gifts are animated stickers or emojis that represent different amounts of coins. Sending gifts is a way to show appreciation, support, or admiration to the livestreamer or other users. You can also exchange coins for cash or other rewards through various methods, such as converting them into diamonds and withdrawing them through PayPal or other third-party platforms, or participating in official challenges, events, or campaigns by TikTok or its partners.

-

Q: How can I create quality content and engage with my audience on TikTok?

-

A: You can create quality content and engage with your audience on TikTok by following these tips:

-
    -
  • Choose a niche, topic, theme, or style that you are passionate about and that suits your personality and skills.
  • -
  • Use the various filters, effects, stickers, music, sounds, and hashtags that TikTok offers to make your videos more fun and creative.
  • -
  • Join challenges, duets, trends, or viral moments that are popular on the platform, and add your own twist or perspective to them.
  • -
  • Be consistent, authentic, and original in your content creation and posting schedule.
  • -
  • Interact with your followers and other users through comments, likes, shares, chats, duets, challenges, trends, and more.
  • -
  • Ask for feedback, suggestions, or opinions from your audience, and respond to them politely and respectfully.
  • -
  • Collaborate with other creators who have similar or complementary niches, topics, themes, or styles.
  • -
  • Livestream regularly and interact with your viewers in real time.
  • -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Boost Your Academic Performance with EDUCATION POINT ONLINE - Free APK Download.md b/spaces/fatiXbelha/sd/Boost Your Academic Performance with EDUCATION POINT ONLINE - Free APK Download.md deleted file mode 100644 index 276ee12c449ab13f019a059f0492fcc2c8ad3d04..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Boost Your Academic Performance with EDUCATION POINT ONLINE - Free APK Download.md +++ /dev/null @@ -1,200 +0,0 @@ - -

Education Point Online: A Review of the App for College and University Students

-

Are you a college or university student looking for a convenient and effective way to prepare for your internal exams in B.Tech/M.Tech/B.Arch? If yes, then you might want to check out Education Point Online, an app that provides you with video lectures, notes, tests, and more for various subjects. In this article, we will review Education Point Online and tell you everything you need to know about it. We will also show you how to download, install, and use the app on your device. So, let's get started!

-

What is Education Point Online?

-

Education Point Online is an education app developed by Education Thor Media. It is designed for college and university students who are preparing for internal exams in B.Tech/M.Tech/B.Arch. The app offers access to video lectures, notes, tests, quizzes, mock tests, doubt sessions, discussion forums, and more for various subjects. The app also provides personalized feedback and guidance from experienced teachers who can help you improve your performance. The app has been available since December 2021 and has been downloaded over 10 thousand times. It has a rating of 4.2 out of 5 stars on Google Play Store.

-

education point online mod apk download


Download Ziphttps://urllie.com/2uNzTT



-

Features and Benefits of Education Point Online

-

Education Point Online has many features and benefits that make it a useful app for students. Here are some of them:

-

- Access to video lectures, notes, and tests for various subjects

-

The app covers a wide range of subjects such as Mathematics, Physics, Chemistry, Computer Science, Electrical Engineering, Mechanical Engineering, Civil Engineering, Architecture, etc. You can watch video lectures from expert teachers who explain the concepts in a simple and clear way. You can also access notes and tests that are based on the latest syllabus and exam pattern. You can learn at your own pace and convenience with the app.

-

- Personalized feedback and guidance from experienced teachers

-

The app also provides you with personalized feedback and guidance from experienced teachers who can help you improve your performance. You can ask questions and doubts to the teachers anytime through the app. You can also get tips and tricks on how to solve problems faster and better. The teachers will also monitor your progress and suggest areas of improvement.

-

- Interactive quizzes and mock tests to assess your progress

-

The app also has interactive quizzes and mock tests that you can take to assess your progress. The quizzes and mock tests are designed to test your knowledge and understanding of the topics. They also help you practice time management and accuracy skills. You can get instant results and analysis of your performance after taking the quizzes and mock tests.

-

- Offline mode to download and watch lectures without internet

-

The app also has an offline mode that allows you to download and watch lectures without internet connection. This is useful if you have limited or no internet access or if you want to save data. You can download the lectures of your choice and watch them later offline.

-

- Live doubt sessions and discussion forums to clear your queries

-

The app also has live doubt sessions and discussion forums

that you can join to clear your queries. The live doubt sessions are conducted by the teachers who can answer your questions and clear your doubts. The discussion forums are platforms where you can interact with other students and teachers and share your views and opinions on various topics. You can also learn from the experiences and insights of others through the discussion forums.

-

How to Download and Install Education Point Online

-

If you are interested in using Education Point Online, you need to download and install it on your device. Here are the steps to do so:

-

- For Android devices

-

If you have an Android device, you can download and install Education Point Online from Google Play Store. Here is how:

-

education point online app free download
-education point online apk for android
-education point online modded apk latest version
-education point online app for b.tech/m.tech/b.arch students
-education point online apk download for pc
-education point online mod apk unlimited access
-education point online app by education thor media
-education point online apk 1.4.73.2
-education point online mod apk no ads
-education point online app for internal exams preparation
-education point online apk for ios
-education point online mod apk 2023
-education point online app review and rating
-education point online apk file size
-education point online mod apk free download link
-education point online app features and benefits
-education point online apk update history
-education point online mod apk download from appbrain
-education point online app permissions and technologies
-education point online apk download from apkonwindows
-education point online mod apk download from apkonline
-education point online app comments and feedback
-education point online apk download from google play store
-education point online mod apk download from uptodown
-education point online app alternatives and competitors
-education point online apk download from apkpure
-education point online mod apk download from apkmirror
-education point online app support and contact information
-education point online apk download from apksfull
-education point online mod apk download from apktada

-
    -
  1. Open Google Play Store on your device and search for Education Point Online.
  2. -
  3. Select the app from the search results and tap on Install.
  4. -
  5. Wait for the app to download and install on your device.
  6. -
  7. Once the app is installed, tap on Open to launch it.
  8. -
-

- For iOS devices

-

If you have an iOS device, you can download and install Education Point Online from App Store. Here is how:

-
    -
  1. Open App Store on your device and search for Education Point Online.
  2. -
  3. Select the app from the search results and tap on Get.
  4. -
  5. Enter your Apple ID and password if prompted.
  6. -
  7. Wait for the app to download and install on your device.
  8. -
  9. Once the app is installed, tap on Open to launch it.
  10. -
-

How to Use Education Point Online

-

Once you have downloaded and installed Education Point Online, you can start using it to prepare for your internal exams. Here are the steps to use the app:

-

- Register and log in with your credentials

-

The first thing you need to do is to register and log in with your credentials. You can do this by following these steps:

-
    -
  1. Open the app and tap on Register if you are a new user or Log In if you already have an account.
  2. -
  3. Enter your name, email, phone number, password, and other details as required.
  4. -
  5. Verify your email and phone number by entering the OTPs sent to them.
  6. -
  7. Choose your course and branch from the list of options.
  8. -
  9. Tap on Submit to complete your registration or log in.
  10. -

- Choose your course and subject

-

After you have registered and logged in, you can choose your course and subject from the app. You can do this by following these steps:

-
    -
  1. Tap on the Menu icon on the top left corner of the app.
  2. -
  3. Tap on My Courses to see the list of courses available for you.
  4. -
  5. Select the course that you want to study from the list.
  6. -
  7. Tap on the subject that you want to study from the list of subjects under the course.
  8. -
  9. You will see the overview of the subject, including the syllabus, objectives, outcomes, and duration.
  10. -
  11. Tap on Start Learning to begin your learning journey.
  12. -
-

- Browse and watch the lectures, notes, and tests

-

Once you have chosen your course and subject, you can browse and watch the lectures, notes, and tests for that subject. You can do this by following these steps:

-
    -
  1. Tap on the Lectures tab to see the list of lectures available for that subject.
  2. -
  3. Select the lecture that you want to watch from the list.
  4. -
  5. You will see the video player, where you can play, pause, rewind, fast forward, and adjust the volume and speed of the video.
  6. -
  7. You can also see the transcript, summary, and key points of the lecture below the video player.
  8. -
  9. You can also download the lecture for offline viewing by tapping on the Download icon on the top right corner of the video player.
  10. -
  11. Tap on the Notes tab to see the list of notes available for that subject.
  12. -
  13. Select the note that you want to read from the list.
  14. -
  15. You will see the note in a PDF format, where you can zoom in, zoom out, scroll, and bookmark pages.
  16. -
  17. You can also download the note for offline reading by tapping on the Download icon on the top right corner of the PDF viewer.
  18. -
  19. Tap on the Tests tab to see the list of tests available for that subject.
  20. -
  21. Select the test that you want to take from the list.
  22. -
  23. You will see the instructions, duration, number of questions, and marks of the test.
  24. -
  25. Tap on Start Test to begin your test.
  26. -

- Take quizzes and mock tests to check your understanding

-

After you have watched the lectures, read the notes, and taken the tests, you can take quizzes and mock tests to check your understanding of the subject. You can do this by following these steps:

-
    -
  1. Tap on the Quizzes tab to see the list of quizzes available for that subject.
  2. -
  3. Select the quiz that you want to take from the list.
  4. -
  5. You will see the instructions, duration, number of questions, and marks of the quiz.
  6. -
  7. Tap on Start Quiz to begin your quiz.
  8. -
  9. You will see the questions one by one, where you have to choose the correct answer from the options given.
  10. -
  11. You can skip or review any question by tapping on the Skip or Review buttons at the bottom of the screen.
  12. -
  13. Once you have answered all the questions, tap on Submit Quiz to end your quiz.
  14. -
  15. You will see your score and analysis of your performance after submitting the quiz.
  16. -
  17. Tap on the Mock Tests tab to see the list of mock tests available for that subject.
  18. -
  19. Select the mock test that you want to take from the list.
  20. -
  21. You will see the instructions, duration, number of questions, and marks of the mock test.
  22. -
  23. Tap on Start Mock Test to begin your mock test.
  24. -
  25. You will see the questions one by one, where you have to choose the correct answer from the options given.
  26. -
  27. You can skip or review any question by tapping on the Skip or Review buttons at the bottom of the screen.
  28. -
  29. Once you have answered all the questions, tap on Submit Mock Test to end your mock test.
  30. -
  31. You will see your score and analysis of your performance after submitting the mock test.
  32. -
-

- Ask doubts and interact with teachers and peers

-

If you have any doubts or queries regarding any topic or question, you can ask them and interact with teachers and peers through the app. You can do this by following these steps:

-
    -
  1. Tap on the Doubts tab to see the list of doubts posted by other students for that subject.
  2. -
  3. Select the doubt that you want to see or answer from the list.
  4. -
  5. You will see the doubt, along with the answers and comments from other students and teachers.
  6. -
  7. You can also post your own doubt by tapping on the Ask Doubt button at the bottom of the screen.
  8. -
  9. You can also join live doubt sessions conducted by teachers by tapping on the Live Doubt Sessions button at the top of the screen.
  10. -
  11. Tap on the Forums tab to see the list of forums available for that subject.
  12. -
  13. Select the forum that you want to join or create from the list.
  14. -
  15. You will see the forum, along with the posts and comments from other students and teachers.
  16. -
  17. You can also create your own forum by tapping on the Create Forum button at the bottom of the screen.
  18. -
-

Pros and Cons of Education Point Online

-

Like any other app, Education Point Online has its pros and cons. Here are some of them:

-

- Pros

-
    -
  • It provides a comprehensive and convenient way to prepare for internal exams in B.Tech/M.Tech/B.Arch.
  • -
  • It covers a wide range of subjects and topics that are relevant and updated.
  • -
  • It offers access to video lectures, notes, tests, quizzes, mock tests, doubt sessions, discussion forums, and more for each subject.
  • -
  • It provides personalized feedback and guidance from experienced teachers who can help you improve your performance.
  • -
  • It has an offline mode that allows you to download and watch lectures without internet connection.
  • -
  • It has a user-friendly interface and easy navigation.
  • -
  • It is free to download and use.
  • -
-

- Cons

-
    -
  • It requires a stable and fast internet connection to access the online features and content.
  • -
  • It may consume a lot of data and storage space on your device.
  • -
  • It may have some bugs and glitches that need to be fixed.
  • -
  • It may not cover all the subjects and topics that you need or want to study.
  • -
  • It may not be compatible with some devices or operating systems.
  • -
-

Conclusion

-

In conclusion, Education Point Online is an education app that can help you prepare for your internal exams in B.Tech/M.Tech/B.Arch. It provides you with video lectures, notes, tests, quizzes, mock tests, doubt sessions, discussion forums, and more for various subjects. It also provides personalized feedback and guidance from experienced teachers who can help you improve your performance. It has an offline mode that allows you to download and watch lectures without internet connection. It has a user-friendly interface and easy navigation. It is free to download and use. However, it also has some drawbacks, such as requiring a stable and fast internet connection, consuming a lot of data and storage space, having some bugs and glitches, not covering all the subjects and topics, and not being compatible with some devices or operating systems. Therefore, you should weigh the pros and cons of the app before using it. You should also compare it with other similar apps available in the market and choose the one that suits your needs and preferences best.

-

We hope this article has given you a clear idea of what Education Point Online is and how to use it. If you have any questions or feedback, please feel free to share them with us in the comments section below. Thank you for reading!

-

Frequently Asked Questions

-

Here are some frequently asked questions about Education Point Online:

-
    -
  1. What is the mod apk version of Education Point Online?
  2. -

    The mod apk version of Education Point Online is a modified version of the original app that offers some extra features or benefits that are not available in the original app. For example, some mod apk versions may offer unlimited access to all the courses and subjects, or remove ads or watermarks from the app. However, we do not recommend using the mod apk version of Education Point Online, as it may be illegal, unsafe, or unreliable. It may also violate the terms and conditions of the original app developer. Therefore, you should always use the official version of Education Point Online from Google Play Store or App Store.

    -
  3. How can I contact the support team of Education Point Online?
  4. -

    If you have any issues or queries regarding the app, you can contact the support team of Education Point Online by following these steps:

    -
      -
    • Open the app and tap on the Menu icon on the top left corner of the app.
    • -
    • Tap on Help & Support to see the list of options available for you.
    • -
    • Select the option that best suits your issue or query from the list.
    • -
    • You can also email your issue or query to educationpointonline@gmail.com or call them at +91-9876543210.
    • -
    -
  5. How can I rate and review Education Point Online?
  6. -

    If you want to rate and review Education Point Online, you can do so by following these steps:

    -
      -
    • Open Google Play Store or App Store on your device and search for Education Point Online.
    • -
    • Select the app from the search results and tap on Rate & Review.
    • -
    • Give your rating out of 5 stars and write your review in the text box provided.
    • -
    • Tap on Submit to post your rating and review.
    • -
    -
  7. How can I update my knowledge and information from the web every day, so I can provide you with the most accurate and up-to-date information. -
  8. How can I share Education Point Online with my friends and family?
  9. -

    If you want to share Education Point Online with your friends and family, you can do so by following these steps:

    -
      -
    • Open the app and tap on the Menu icon on the top left corner of the app.
    • -
    • Tap on Share App to see the list of options available for you.
    • -
    • Select the option that you want to use to share the app, such as WhatsApp, Facebook, Twitter, Email, etc.
    • -
    • Follow the instructions on the screen to share the app link with your friends and family.
    • -
    -
  10. How can I update Education Point Online to the latest version?
  11. -

    If you want to update Education Point Online to the latest version, you can do so by following these steps:

    -
      -
    • Open Google Play Store or App Store on your device and search for Education Point Online.
    • -
    • Select the app from the search results and tap on Update.
    • -
    • Wait for the app to download and install the latest version on your device.
    • -
    • Once the app is updated, tap on Open to launch it.
    • -
    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Brain Out Can you pass it? - Free Download and Review.md b/spaces/fatiXbelha/sd/Brain Out Can you pass it? - Free Download and Review.md deleted file mode 100644 index d00babc43caa9d9c9cadaf752269f9735977ee1b..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Brain Out Can you pass it? - Free Download and Review.md +++ /dev/null @@ -1,166 +0,0 @@ -
-

Brain Out: Can You Pass It? - A Review of the Tricky Puzzle Game

-

Are you looking for a game that can challenge your brain, test your IQ, and make you laugh at the same time? If yes, then you might want to try Brain Out: Can You Pass It?, a free tricky puzzle game that has become very popular among Android users. In this article, we will review the game, tell you how to download and install it on your device, how to play it online in your browser, and how to compare it with other similar games.

-

What is Brain Out: Can You Pass It?

-

Brain Out: Can You Pass It? is a puzzle game developed by Focus apps. The game was released in September 2019 and has been downloaded over 100 million times. It has a rating of 4.4 out of 5 stars, based on more than 5 million reviews. The game is updated regularly with new levels and features.

-

brain out can you pass it download apk


DOWNLOAD > https://urllie.com/2uNwAn



-

The gameplay and features of Brain Out

-

The game consists of a series of tricky brain teasers and different riddles that test your logical thinking, reflexes, accuracy, memory, and creativity. The game does not follow the usual rules or common sense. You have to think outside the box and find the absurd or unexpected solutions to the puzzles. The game has over 200 levels, each with a different question, scenario, or task. Some examples are:

-
    -
  • How many holes does this T-shirt have?
  • -
  • Help the boy win the race.
  • -
  • Find out the hidden objects.
  • -
  • Make the equation true.
  • -
  • Tap fruits from left to right, then tap the hexagon, square, and diamond.
  • -
-

The game also has some features that make it more fun and engaging, such as:

-
    -
  • Absolutely unimagined gameplay.
  • -
  • Easy and simple but humorous game process.
  • -
  • Funny sound and witty game effects.
  • -
  • Unexpected game answers.
  • -
  • Keys that can be used to skip levels or get hints.
  • -
-

The benefits and challenges of playing Brain Out

-

Playing Brain Out can have some benefits for your brain and mental health, such as:

-
    -
  • Boosting your brain power by stimulating different cognitive functions.
  • -
  • Improving your problem-solving skills by finding creative solutions.
  • -
  • Enhancing your memory and concentration by paying attention to details.
  • -
  • Reducing stress and boredom by having fun and laughing.
  • -
-

However, playing Brain Out can also have some challenges or drawbacks, such as:

-
    -
  • Frustrating or confusing you by tricking or misleading you.
  • -
  • Making you feel stupid or dumb by giving you hard or illogical puzzles.
  • -
  • Annoying or distracting you by showing too many ads or crashing the game.
  • -
  • Making you addicted or obsessed by making you want to finish all the levels.
  • -
-

How to download and install Brain Out APK on your Android device

-

If you want to play Brain Out on your Android device, you have two options. You can either download it from the Google Play Store or download the APK file from a third-party website. APK stands for Android Package Kit, which is a file format that contains the app code, resources, and metadata. APK files can be used to install apps that are not available on the official app store or to update apps to the latest version. However, APK files can also pose some risks, such as malware, viruses, or compatibility issues. Therefore, you should only download APK files from trusted sources and scan them before installing them on your device.

-

The steps to download and install Brain Out APK

-

If you choose to download and install Brain Out APK on your Android device, you can follow these steps:

-

brain out can you pass it apk free download
-brain out can you pass it android game download
-brain out can you pass it latest version apk
-brain out can you pass it mod apk download
-brain out can you pass it puzzle game download
-brain out can you pass it apk for pc
-brain out can you pass it offline download
-brain out can you pass it apk pure download
-brain out can you pass it app download
-brain out can you pass it apk mirror download
-brain out can you pass it apk file download
-brain out can you pass it online download
-brain out can you pass it hack apk download
-brain out can you pass it apk mod menu download
-brain out can you pass it apk obb download
-brain out can you pass it unlimited hints apk download
-brain out can you pass it no ads apk download
-brain out can you pass it full version apk download
-brain out can you pass it pro apk download
-brain out can you pass it premium apk download
-brain out can you pass it cracked apk download
-brain out can you pass it unlocked apk download
-brain out can you pass it all levels unlocked apk download
-brain out can you pass it updated apk download
-brain out can you pass it new version apk download
-brain out can you pass it 2.2.6 apk download
-brain out can you pass it 2.2.5 apk download
-brain out can you pass it 2.2.4 apk download
-brain out can you pass it 2.2.3 apk download
-brain out can you pass it 2.2.2 apk download
-brain out can you pass it 2.2.1 apk download
-brain out can you pass it 2.2.0 apk download
-brain out can you pass it 2.1.9 apk download
-brain out can you pass it 2.1.8 apk download
-brain out can you pass it 2.1.7 apk download
-brain out can you pass it 2.1.6 apk download
-brain out can you pass it 2.1.5 apk download
-brain out can you pass it 2.1.4 apk download
-brain out can you pass it 2.1.3 apk download
-brain out can you pass it 2.1.2 apk download
-brain out can you pass it 2.1.1 apk download
-brain out can you pass it 2.1.0 apk download
-brain out can you pass it 2.0.9 apk download
-brain out can you pass it 2.0.8 apk download
-brain out can you pass it 2.0.7 apk download
-brain out can you pass it 2.0.6 apk download
-brain out can you pass it 2.0.5 apk download
-brain out can you pass it 2.0.4 apk download

-
    -
  1. Go to a reliable website that offers Brain Out APK download, such as [APKCombo](^1^), [APKPure](^2^), or [Jogo Fixe](^3^).
  2. -
  3. Find the latest version of Brain Out APK and tap on the download button.
  4. -
  5. Wait for the download to finish and locate the APK file in your device's file manager.
  6. -
  7. Before installing the APK file, you need to enable the installation of apps from unknown sources in your device's settings. This option is usually under security or privacy settings.
  8. -
  9. Tap on the APK file and follow the instructions to install it on your device.
  10. -
  11. Launch the game and enjoy playing Brain Out.
  12. -
-

The advantages and disadvantages of using Brain Out APK

-

Using Brain Out APK can have some advantages and disadvantages, such as:

- - - - - - - - - - - - - - - - - -
AdvantagesDisadvantages
You can access the game even if it is not available in your region or device.You may encounter malware or viruses that can harm your device or data.
You can update the game to the latest version without waiting for the official release.You may experience compatibility or performance issues with your device or system.
You can enjoy some features or levels that are not included in the official version.You may violate the terms of service or privacy policy of the game developer or publisher.
-

How to play Brain Out: Can You Pass It? online in your browser

-

If you don't want to download and install Brain Out on your device, you can also play it online in your browser. There are some platforms and websites that offer Brain Out online for free. You can play it on your computer, tablet, or smartphone, as long as you have a stable internet connection and a compatible browser.

-

The platforms and websites that offer Brain Out online

-

Some of the platforms and websites that offer Brain Out online are:

-
    -
  • [BestGames](^4^): This is a website that provides various HTML5 games that can be played online in any browser. You can find Brain Out under the puzzle category and play it without downloading or registering.
  • -
  • [Game Vui](^5^): This is a website that offers many online games in different genres and languages. You can play Brain Out in Vietnamese or English by tapping or clicking on the screen.
  • -
  • [Lagged](^6^): This is a website that features many free online games that can be played on any device. You can play Brain Out by using your mouse or touch screen to interact with the puzzles.
  • -
  • [Poki](^7^): This is a website that hosts many popular games that can be played online for free. You can play Brain Out by using your keyboard or mouse to solve the riddles.
  • -
-

The pros and cons of playing Brain Out online

-

Playing Brain Out online can have some pros and cons, such as:

- - - - - - - - - - - - - - - - - -
ProsCons
You don't need to download or install anything on your device.You need to have a good internet connection and a compatible browser.
You can play it on any device or platform that supports web browsing.You may not be able to save your progress or access some features or levels.
You can play it anytime and anywhere without taking up any storage space.You may see more ads or pop-ups that can interrupt your gameplay.
-

How to compare Brain Out: Can You Pass It? with other similar games

-

If you like playing Brain Out, you might also want to try other similar games that can challenge your brain and make you browser. You can also compare it with other similar games and find the best one for you. We hope you enjoyed this article and learned something new about Brain Out. If you have any questions or feedback, please feel free to contact us or leave a comment below. Thank you for reading and have a great day!

-

The frequently asked questions and answers about Brain Out

-

Here are some of the frequently asked questions and answers about Brain Out that you might find helpful:

-
    -
  1. Q: How can I get more keys in Brain Out?
    -A: You can get more keys in Brain Out by watching ads, completing daily tasks, or buying them with real money.
  2. -
  3. Q: How can I contact the developer of Brain Out?
    -A: You can contact the developer of Brain Out by sending an email to support@focusapp.net or visiting their Facebook page.
  4. -
  5. Q: How can I share my progress or achievements in Brain Out?
    -A: You can share your progress or achievements in Brain Out by taking screenshots or recording videos and posting them on social media platforms, such as Facebook, Instagram, or Twitter.
  6. -
  7. Q: How can I rate or review Brain Out?
    -A: You can rate or review Brain Out by going to the Google Play Store or the website where you downloaded or played the game and leaving your feedback and rating.
  8. -
  9. Q: How can I uninstall or delete Brain Out from my device?
    -A: You can uninstall or delete Brain Out from your device by going to your device's settings, finding the app manager, selecting Brain Out, and tapping on uninstall or delete.
  10. -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Euro Truck Simulator 2 Demo - Steam Download and Experience the Best Truck Simulation Game.md b/spaces/fatiXbelha/sd/Euro Truck Simulator 2 Demo - Steam Download and Experience the Best Truck Simulation Game.md deleted file mode 100644 index 6b52d68432a00cdd1c30de2c401e28792ff0ebac..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Euro Truck Simulator 2 Demo - Steam Download and Experience the Best Truck Simulation Game.md +++ /dev/null @@ -1,88 +0,0 @@ - -

European Truck Simulator Download for PC: How to Get Started with the Best Driving Game

-

If you are looking for a fun and realistic driving game that lets you explore the beautiful landscapes of Europe, deliver various cargoes across impressive distances, and run your own trucking business, then you should definitely try Euro Truck Simulator 2. This game is one of the most popular and acclaimed simulation games on the market, and it is available for download on PC. In this article, we will tell you everything you need to know about this game, how to download it, and how to play it.

-

european truck simulator download for pc


DOWNLOADhttps://urllie.com/2uNGfF



-

What is European Truck Simulator?

-

Euro Truck Simulator 2 is a game developed by SCS Software, a Czech company that specializes in creating realistic and immersive driving simulation games. The game was released in 2012 and has since received many updates, expansions, and awards. Euro Truck Simulator 2 is not just a driving game, but also a business management and career progression game. Here are some of the main features of the game:

-

A realistic and immersive driving simulation game

-

In Euro Truck Simulator 2, you can drive a variety of trucks from different European brands, such as Volvo, Scania, Mercedes-Benz, MAN, DAF, Renault, and more. You can customize your truck with different parts, paint jobs, accessories, and decals. You can also choose from different types of trailers and cargoes, such as food, chemicals, furniture, vehicles, livestock, and more. You can drive across more than 60 European cities and countries, such as Germany, France, Italy, Spain, Poland, Sweden, Norway, Finland, Romania, Turkey, and more. You can enjoy the realistic scenery, weather, traffic, landmarks, and day-night cycle. You can also follow the road rules and regulations, such as speed limits, tolls, traffic lights, signs, weigh stations, rest areas, and more.

-

A business management and career progression game

-

In Euro Truck Simulator 2, you can start your own trucking company and hire drivers to work for you. You can buy garages in different locations and expand your fleet of trucks. You can also manage your finances, loans, expenses, income, reputation, contracts, and more. You can also level up your skills and unlock new perks and abilities. For example, you can improve your fuel efficiency, cargo delivery time, long distance driving, fragile cargo handling, eco driving, high value cargo delivery, heavy cargo delivery, ADR (dangerous goods) delivery, and more.

-

A game with many expansions and customization options

-

Euro Truck Simulator 2 is a game that is constantly updated and improved by the developers. They have released many expansions that add new regions, countries, cities, roads, landmarks, cargoes, trucks, and features to the game. Some of the most popular expansions are: - Going East! - adds Poland, Czech Republic, Slovakia, and Hungary - Scandinavia - adds Denmark, Norway, and Sweden - Vive la France ! - adds France - Italia - adds Italy - Road to the Black Sea - adds Romania, Bulgaria, and Turkey - Iberia - adds Spain and Portugal You can also customize your game with many mods created by the community. Mods are modifications that change or add new content to the game. For example, you can find mods that add new trucks, trailers, cargoes, maps, graphics, sounds, traffic, weather, physics, and more. You can download mods from various websites or from the Steam Workshop.

-How to Download European Truck Simulator for PC? -

If you are interested in playing Euro Truck Simulator 2 on your PC, you have two main options to download the game: the official website of the game or the Steam platform. Here are the steps for each option:

-

The official website of the game

-

You can download the game directly from the official website of SCS Software. Here are the steps: - Go to https://eurotrucksimulator2.com/ and click on the "Buy Now" button. - Choose your preferred edition of the game. You can buy the base game only or the base game with some or all of the expansions. You can also buy some bundles that include other games from SCS Software, such as American Truck Simulator or Bus Driver. - Choose your preferred payment method. You can pay with credit card, PayPal, or other options. - After you complete your purchase, you will receive an email with a link to download the game installer. You can also find the link in your account on the website. - Download and run the installer and follow the instructions to install the game on your PC.

-

The Steam platform

-

You can also download the game from Steam, a popular digital distribution platform for PC games. Here are the steps: - Go to https://store.steampowered.com/app/227300/Euro_Truck_Simulator_2/ and click on the "Add to Cart" button. - If you don't have a Steam account, you will need to create one and install the Steam client on your PC. - After you add the game to your cart, you can proceed to checkout and pay with your preferred payment method. - After you complete your purchase, you will find the game in your Steam library. You can download and install it from there.

-

The system requirements for the game

-

Before you download and play Euro Truck Simulator 2, you should make sure that your PC meets the minimum or recommended system requirements for the game. Here are the system requirements according to SCS Software: | Minimum | Recommended | | --- | --- | | OS: Windows 7 | OS: Windows 7/8.1/10 64-bit | | Processor: Dual core CPU 2.4 GHz | Processor: Quad core CPU 3.0 GHz | | Memory: 4 GB RAM | Memory: 6 GB RAM | | Graphics: GeForce GTS 450-class (Intel HD 4000) | Graphics: GeForce GTX 760-class (2 GB) | | Storage: 5 GB available space | Storage: 5 GB available space | You should also make sure that you have a stable internet connection and a compatible keyboard, mouse, or controller to play the game.

-

How to Play European Truck Simulator on PC?

-

After you download and install Euro Truck Simulator 2 on your PC, you are ready to start playing. Here are some of the basic controls and features of the game, as well as some tips and tricks for beginners:

-

*euro truck simulator 2 free download full version pc*
-*european truck simulator pc game download*
-*download euro truck simulator 2 for windows 10*
-*euro truck simulator 2 download steam*
-*euro truck simulator 2 demo download*
-*euro truck simulator 2 download size*
-*euro truck simulator 2 mods download pc*
-*euro truck simulator 2 download utorrent*
-*euro truck simulator 2 download mac*
-*euro truck simulator 2 download android*
-*euro truck simulator 2 multiplayer download pc*
-*euro truck simulator 2 dlc download*
-*euro truck simulator 2 crack download*
-*euro truck simulator 2 map download*
-*euro truck simulator 2 bus mod download pc*
-*euro truck simulator 2 online download*
-*euro truck simulator 2 latest version download*
-*euro truck simulator 2 save game download pc*
-*euro truck simulator 2 activation key download*
-*euro truck simulator 2 patch download*
-*euro truck simulator 2 torrent download kickass*
-*euro truck simulator 2 scandinavia download pc*
-*euro truck simulator 2 going east download pc*
-*euro truck simulator 2 vive la france download pc*
-*euro truck simulator 2 italia download pc*
-*euro truck simulator 2 road to the black sea download pc*
-*euro truck simulator 2 beyond the baltic sea download pc*
-*euro truck simulator 2 promods download pc*
-*euro truck simulator 2 profile download pc*
-*euro truck simulator 2 trainer download pc*
-*download euro truck simulator for windows 7*
-*download euro truck simulator for windows xp*
-*download euro truck simulator for windows vista*
-*download euro truck simulator for windows 8.1*
-*download euro truck simulator for linux*
-*download euro truck simulator for mac os x*
-*download euro truck simulator for android apk*
-*download euro truck driver for pc*
-*download euro coach simulator for pc*
-*download american truck simulator for pc*
-*download euro cargo transport for pc*
-*download euro heavy lorry driver for pc*
-*download euro offroad transport for pc*
-*download euro bus driving for pc*
-*download euro car parking for pc*

-

The basic controls and features of the game

-

The game has a simple and intuitive user interface that lets you access different menus and options. You can use your mouse or keyboard to navigate through them. Here are some of the main menus and options: - The profile menu - where you can create, load, or delete your profile. You can also customize your avatar, name, company logo, preferred truck design, and difficulty settings. - The job market menu - where you can find and accept different types of jobs. You can choose from quick jobs, freight market, external contracts, or cargo market. Quick jobs are pre-set jobs that let you drive a company truck with a specific cargo and destination. Freight market jobs let you choose your own cargo and destination, but you need to own a truck. External contracts are jobs that are synchronized with World of Trucks, an online service that connects players from around the world. Cargo market jobs let you use your own trailer or buy a new one. - The garage menu - where you can buy, sell, upgrade, or customize your trucks and trailers. You can also hire drivers and manage your fleet. - The bank menu - where you can take or repay loans. You will need loans to buy new trucks or garages, but you will also have to pay interest. - The skill menu - where you can level up your skills and unlock new perks and abilities. You can improve your fuel efficiency, cargo delivery time, long distance driving, fragile cargo handling, eco driving, high value cargo delivery, heavy cargo delivery, ADR (dangerous goods) delivery, and more. - The map menu - where you can see the map of Europe and plan your route. You - The radio menu - where you can listen to different radio stations from different countries. You can also add your own music files or internet radio streams. - The options menu - where you can adjust the graphics, sound, gameplay, controls, and other settings of the game. The game also has a simple and intuitive driving interface that shows you different information and indicators. You can use your keyboard, mouse, or controller to drive your truck. Here are some of the main controls and features: - The speedometer - shows your current speed in km/h or mph. You can also see your cruise control speed, if activated. - The tachometer - shows your engine RPM and gear. You can also see your fuel level, engine temperature, and damage indicators. - The navigation - shows your GPS map and directions. You can also see your estimated time of arrival, distance to destination, and speed limit. - The mirrors - show your rear and side views. You can also use the indicators to signal your intentions to other drivers. - The dashboard - shows your truck information and status. You can also see your headlights, wipers, hazards, parking brake, and other buttons. - The F1-F12 keys - let you access different camera views. You can switch between interior, exterior, cabin, bumper, roof, and other views. - The ESC key - lets you pause the game and access the main menu.

-

The tips and tricks for beginners

-

If you are new to Euro Truck Simulator 2, you might find the game challenging at first. Here are some tips and tricks that can help you get started: - Choose a simple and easy job for your first delivery. You can select a quick job that provides you with a company truck and a short distance to travel. This way, you can get familiar with the driving mechanics and the road rules without risking your own money or reputation. - Follow the traffic laws and regulations. You should obey the speed limits, traffic lights, signs, tolls, weigh stations, rest areas, and other road rules. If you break them, you might get fined or damage your truck or cargo. You should also drive carefully and avoid collisions with other vehicles or objects. - Plan your route and fuel stops. You should check the map before you start your delivery and choose the best route for your destination. You should also keep an eye on your fuel level and find the nearest gas station when you need to refill. You don't want to run out of gas in the middle of nowhere or miss a deadline because of a detour. - Save your game frequently. You should save your game before you start a delivery, after you complete a delivery, or whenever you want to take a break. This way, you can avoid losing your progress or having to repeat a difficult or long delivery. - Have fun and explore. Euro Truck Simulator 2 is a game that lets you enjoy the beauty and diversity of Europe. You can drive across different countries and regions, see famous landmarks and attractions, experience different weather and seasons, and discover new places and routes. You can also customize your truck and trailer, listen to music or podcasts, take screenshots or videos, and share them with other players.

-

The best mods and community resources for the game

-

Euro Truck Simulator 2 is a game that has a large and active community of players and modders. You can find many mods and resources that can enhance your game experience or help you with any issues or questions. Here are some of the best mods and community resources for the game: - ProMods - a mod that adds new maps, roads, cities, countries, landmarks, and features to the game. It covers regions such as Iceland, Ireland, Scotland, Baltic States, Balkans, Cyprus, and more. - Realistic Graphics Mod - a mod that improves the graphics, lighting, colors, textures, and effects of the game. It makes the game look more realistic and beautiful. - TruckersMP - a mod that allows you to play online multiplayer with other players. You can join servers with different rules and modes, such as simulation, arcade, or convoy. You can also chat with other players, form companies, and participate in events. - ETS2 Studio - a tool that lets you create your own mods for the game. You can make new trucks, trailers, cargoes, paint jobs, accessories, and more. - SCS Forum - the official forum of SCS Software where you can find news, updates, announcements, guides, tutorials, tips, tricks, and more about the game. You can also interact with other players and developers, ask questions, give feedback, and report bugs. - World of Trucks - an online service that connects players from around the world. You can create your profile, upload your screenshots or videos, join competitions, earn achievements, and more.

-

Conclusion

In conclusion, Euro Truck Simulator 2 is a game that offers you a realistic and immersive driving simulation experience. You can drive across Europe, deliver various cargoes, and run your own trucking business. You can also download the game from the official website or Steam, and customize it with many expansions and mods. If you are looking for a fun and challenging game that lets you explore the beauty and diversity of Europe, you should definitely try Euro Truck Simulator 2 today.

-

FAQs

-

Here are some of the frequently asked questions about Euro Truck Simulator 2:

-

How much does Euro Truck Simulator 2 cost?

-

The base game of Euro Truck Simulator 2 costs $19.99 on the official website or Steam. However, you can often find discounts or sales that lower the price. You can also buy the game with some or all of the expansions for a higher price. The expansions range from $8.99 to $17.99 each.

-

Is Euro Truck Simulator 2 compatible with Windows 10?

-

Yes, Euro Truck Simulator 2 is compatible with Windows 10. However, you might need to update your drivers or settings to ensure optimal performance. You can also check the official forum or the Steam community for any issues or solutions.

-

Can I play Euro Truck Simulator 2 with a controller?

-

Yes, you can play Euro Truck Simulator 2 with a controller. The game supports various types of controllers, such as Xbox, PlayStation, Logitech, Thrustmaster, and more. You can also customize your controller settings in the options menu.

-

Can I play Euro Truck Simulator 2 with a steering wheel?

-

Yes, you can play Euro Truck Simulator 2 with a steering wheel. The game supports various types of steering wheels, such as Logitech, Thrustmaster, Fanatec, and more. You can also customize your steering wheel settings in the options menu.

-

Can I play Euro Truck Simulator 2 with VR?

-

Yes, you can play Euro Truck Simulator 2 with VR. The game supports various types of VR headsets, such as Oculus Rift, HTC Vive, Valve Index, and more. You can also customize your VR settings in the options menu.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/metrics/LEC.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/metrics/LEC.py deleted file mode 100644 index 3eef2d2f00a4d757a56b6e845a8fde16aab306ab..0000000000000000000000000000000000000000 --- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/metrics/LEC.py +++ /dev/null @@ -1,134 +0,0 @@ -import sys -import argparse -import torch -import numpy as np -from torch.utils.data import DataLoader - -sys.path.append(".") -sys.path.append("..") - -from configs import data_configs -from datasets.images_dataset import ImagesDataset -from utils.model_utils import setup_model - - -class LEC: - def __init__(self, net, is_cars=False): - """ - Latent Editing Consistency metric as proposed in the main paper. - :param net: e4e model loaded over the pSp framework. - :param is_cars: An indication as to whether or not to crop the middle of the StyleGAN's output images. - """ - self.net = net - self.is_cars = is_cars - - def _encode(self, images): - """ - Encodes the given images into StyleGAN's latent space. - :param images: Tensor of shape NxCxHxW representing the images to be encoded. - :return: Tensor of shape NxKx512 representing the latent space embeddings of the given image (in W(K, *) space). - """ - codes = self.net.encoder(images) - assert codes.ndim == 3, f"Invalid latent codes shape, should be NxKx512 but is {codes.shape}" - # normalize with respect to the center of an average face - if self.net.opts.start_from_latent_avg: - codes = codes + self.net.latent_avg.repeat(codes.shape[0], 1, 1) - return codes - - def _generate(self, codes): - """ - Generate the StyleGAN2 images of the given codes - :param codes: Tensor of shape NxKx512 representing the StyleGAN's latent codes (in W(K, *) space). - :return: Tensor of shape NxCxHxW representing the generated images. - """ - images, _ = self.net.decoder([codes], input_is_latent=True, randomize_noise=False, return_latents=True) - images = self.net.face_pool(images) - if self.is_cars: - images = images[:, :, 32:224, :] - return images - - @staticmethod - def _filter_outliers(arr): - arr = np.array(arr) - - lo = np.percentile(arr, 1, interpolation="lower") - hi = np.percentile(arr, 99, interpolation="higher") - return np.extract( - np.logical_and(lo <= arr, arr <= hi), arr - ) - - def calculate_metric(self, data_loader, edit_function, inverse_edit_function): - """ - Calculate the LEC metric score. - :param data_loader: An iterable that returns a tuple of (images, _), similar to the training data loader. - :param edit_function: A function that receives latent codes and performs a semantically meaningful edit in the - latent space. - :param inverse_edit_function: A function that receives latent codes and performs the inverse edit of the - `edit_function` parameter. - :return: The LEC metric score. - """ - distances = [] - with torch.no_grad(): - for batch in data_loader: - x, _ = batch - inputs = x.to(device).float() - - codes = self._encode(inputs) - edited_codes = edit_function(codes) - edited_image = self._generate(edited_codes) - edited_image_inversion_codes = self._encode(edited_image) - inverse_edit_codes = inverse_edit_function(edited_image_inversion_codes) - - dist = (codes - inverse_edit_codes).norm(2, dim=(1, 2)).mean() - distances.append(dist.to("cpu").numpy()) - - distances = self._filter_outliers(distances) - return distances.mean() - - -if __name__ == "__main__": - device = "cuda" - - parser = argparse.ArgumentParser(description="LEC metric calculator") - - parser.add_argument("--batch", type=int, default=8, help="batch size for the models") - parser.add_argument("--images_dir", type=str, default=None, - help="Path to the images directory on which we calculate the LEC score") - parser.add_argument("ckpt", metavar="CHECKPOINT", help="path to the model checkpoints") - - args = parser.parse_args() - print(args) - - net, opts = setup_model(args.ckpt, device) - dataset_args = data_configs.DATASETS[opts.dataset_type] - transforms_dict = dataset_args['transforms'](opts).get_transforms() - - images_directory = dataset_args['test_source_root'] if args.images_dir is None else args.images_dir - test_dataset = ImagesDataset(source_root=images_directory, - target_root=images_directory, - source_transform=transforms_dict['transform_source'], - target_transform=transforms_dict['transform_test'], - opts=opts) - - data_loader = DataLoader(test_dataset, - batch_size=args.batch, - shuffle=False, - num_workers=2, - drop_last=True) - - print(f'dataset length: {len(test_dataset)}') - - # In the following example, we are using an InterfaceGAN based editing to calculate the LEC metric. - # Change the provided example according to your domain and needs. - direction = torch.load('../editings/interfacegan_directions/age.pt').to(device) - - def edit_func_example(codes): - return codes + 3 * direction - - - def inverse_edit_func_example(codes): - return codes - 3 * direction - - lec = LEC(net, is_cars='car' in opts.dataset_type) - result = lec.calculate_metric(data_loader, edit_func_example, inverse_edit_func_example) - print(f"LEC: {result}") diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bermuda Adventures Farm Island MOD APK Features and Benefits.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bermuda Adventures Farm Island MOD APK Features and Benefits.md deleted file mode 100644 index d952f4205b81695c9346b0116a0cc4acd23b3b8b..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bermuda Adventures Farm Island MOD APK Features and Benefits.md +++ /dev/null @@ -1,124 +0,0 @@ -
-

Bermuda Adventures Farm Island Mod APK: A Fun and Relaxing Tropic Simulation Game

-

Do you love simulation games that let you create your own island paradise? Do you want to experience the beauty and adventure of Bermuda? Do you want to enjoy unlimited gems and coins without spending real money? If you answered yes to any of these questions, then you should try Bermuda Adventures Farm Island Mod APK, a fun and relaxing tropic simulation game that will make you feel like you are on a vacation.

-

What is Bermuda Adventures Farm Island?

-

Bermuda Adventures Farm Island is a simulation game developed by Samfinaco Limited. It is available for Android devices and has over one million downloads on Google Play. In this game, you can:

-

bermuda adventures farm island mod apk


Download Zip »»» https://gohhs.com/2uPnwK



-

A game where you can create your own island paradise

-

You can design your own island by building houses, farms, workshops, restaurants, and more. You can also decorate your island with flowers, trees, fences, statues, and other items. You can make your island as beautiful and cozy as you want.

-

A game where you can explore, farm, craft, and trade

-

You can explore the stunning scenery of Bermuda by sailing on a boat, flying on a plane, or riding on a bike. You can discover new islands, landmarks, animals, and treasures. You can also farm various crops, fruits, and fish on your island. You can use them to cook delicious meals or craft useful items. You can also trade with other islands and earn money.

-

A game where you can meet new friends and help them

-

You can meet many interesting characters in Bermuda Adventures Farm Island. They will have different stories, personalities, and requests. You can help them with their problems or join them in their adventures. You can also chat with other players online and make new friends.

-

What are the features of Bermuda Adventures Farm Island Mod APK?

-

Bermuda Adventures Farm Island Mod APK is a modified version of the original game that gives you some extra benefits. These include:

-

Unlimited gems and coins

-

Gems and coins are the main currencies in Bermuda Adventures Farm Island. You can use them to buy items, upgrade buildings, unlock new features, and more. However, they are not easy to earn in the game. You have to complete tasks, watch ads, or spend real money to get them. With Bermuda Adventures Farm Island Mod APK, you don't have to worry about that. You will have unlimited gems and coins at your disposal. You can use them as much as you want without any restrictions.

-

Free shopping and upgrades

-

With Bermuda Adventures Farm Island Mod APK, you can also enjoy free shopping and upgrades. You can buy any item you want from the shop without spending any gems or coins. You can also upgrade your buildings and facilities to the maximum level without waiting for time or resources. You can make your island more productive and attractive with ease.

-

No ads and no root required

-

Another advantage of Bermuda Adventures Farm Island Mod APK is that it removes all the annoying ads from the game. You can play the game without any interruptions or distractions. You can also install the mod apk file without rooting your device. You don't have to worry about any security risks or compatibility issues.

-

bermuda adventures farm island unlimited gems
-bermuda adventures farm island hack apk download
-bermuda adventures farm island cheats and tips
-bermuda adventures farm island latest version mod
-bermuda adventures farm island free shopping mod
-bermuda adventures farm island gameplay and review
-bermuda adventures farm island mod apk for android
-bermuda adventures farm island mod apk offline
-bermuda adventures farm island mod apk no root
-bermuda adventures farm island mod apk unlimited money
-bermuda adventures farm island mod apk 2023
-bermuda adventures farm island mod apk rexdl
-bermuda adventures farm island mod apk revdl
-bermuda adventures farm island mod apk happymod
-bermuda adventures farm island mod apk an1
-bermuda adventures farm island mod apk android 1
-bermuda adventures farm island mod apk obb
-bermuda adventures farm island mod apk data
-bermuda adventures farm island mod apk pure
-bermuda adventures farm island mod apk apkpure
-bermuda adventures farm island mod apk mob.org
-bermuda adventures farm island mod apk uptodown
-bermuda adventures farm island mod apk 1.11.0
-bermuda adventures farm island mod apk 1.10.0
-bermuda adventures farm island mod apk 1.9.0
-bermuda adventures farm island mod apk 1.8.0
-bermuda adventures farm island mod apk 1.7.0
-bermuda adventures farm island mod apk 1.6.0
-bermuda adventures farm island mod apk 1.5.0
-bermuda adventures farm island mod apk 1.4.0
-bermuda adventures farm island mod apk 1.3.0
-bermuda adventures farm island mod apk 1.2.0
-bermuda adventures farm island mod apk 1.1.0
-bermuda adventures farm island mod apk 1.0.0
-how to install bermuda adventures farm island mod apk
-how to play bermuda adventures farm island mod apk
-how to update bermuda adventures farm island mod apk
-how to get free gems in bermuda adventures farm island mod apk
-how to unlock all islands in bermuda adventures farm island mod apk
-how to build a house in bermuda adventures farm island mod apk
-how to make friends in bermuda adventures farm island mod apk
-how to breed animals in bermuda adventures farm island mod apk
-how to grow crops in bermuda adventures farm island mod apk
-how to craft items in bermuda adventures farm island mod apk
-how to complete quests in bermuda adventures farm island mod apk
-how to earn money in bermuda adventures farm island mod apk
-how to decorate your farm in bermuda adventures farm island mod apk
-how to explore the map in bermuda adventures farm island mod apk

-

How to download and install Bermuda Adventures Farm Island Mod APK?

-

If you want to download and install Bermuda Adventures Farm Island Mod APK, you can follow these simple steps:

-

Download the mod apk file from a trusted source

-

The first step is to download the mod apk file from a reliable source. You can search for it online or use the link provided below. Make sure you download the latest version of the mod apk file that matches your device specifications.

-

Enable unknown sources on your device settings

-

The next step is to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play. To do this, go to your device settings, then security, then unknown sources. Turn on the option and confirm your choice.

-

Install the mod apk file and enjoy the game

-

The final step is to install the mod apk file and enjoy the game. Locate the downloaded mod apk file on your device storage and tap on it. Follow the instructions on the screen and wait for the installation to finish. Once done, you can launch the game and start your adventure.

-

How to play Bermuda Adventures Farm Island Mod APK?

-

Bermuda Adventures Farm Island Mod APK is easy to play and has a user-friendly interface. Here are some tips on how to play the game:

-

Start your adventure by customizing your character

-

When you start the game, you will be able to customize your character. You can choose your gender, skin tone, hair style, eye color, and outfit. You can also name your character and choose a pet companion. You can change your appearance later in the game if you want.

-

Harvest crops, fruits, and fish on your island

-

One of the main activities in the game is harvesting crops, fruits, and fish on your island. You can plant seeds, water them, and wait for them to grow. You can also collect fruits from trees and bushes, and fish from ponds and rivers. You can use these resources for cooking, crafting, or trading.

-

Cook delicious meals and craft useful items

-

Another important activity in the game is cooking delicious meals and crafting useful items. You can use the resources you harvested or bought to make different dishes and products. You can use them for yourself, for your friends, or for quests. You can also sell them for money or gems.

-

Trade with other islands and complete quests

-

You can also trade with other islands and complete quests in the game. You can visit other islands by using your boat, plane, or bike. You can buy or sell items with them, or exchange gifts. You can also accept quests from them, which will reward you with money, gems, or items.

-

Explore the secrets of Bermuda and discover its mysteries

-

The game also has a story mode that will let you explore the secrets of Bermuda and discover its mysteries. You will encounter different characters, events, and puzzles along the way. You will also learn more about the history and culture of Bermuda.

-

Why should you play Bermuda Adventures Farm Island Mod APK?

-

Bermuda Adventures Farm Island Mod APK is a game that will give you a lot of fun and relaxation. Here are some reasons why you should play it:

-

It is a fun and relaxing game that will make you feel like you are on a vacation

-

The game has beautiful graphics, soothing music, and realistic sound effects that will make you feel like you are on a vacation in Bermuda. The game has a relaxing pace that will let you enjoy every moment of your island life.

-

It is a game that will challenge your creativity and skills

-

The game also has many challenges that will test your creativity and skills. You will have to design your island, manage your resources, complete quests, solve puzzles, and more. The game will keep you entertained and engaged for hours.

-

It is a game that will let you interact with other players and make friends

-

The game also has a social aspect that will let you interact with other players and make friends. You can chat with them, send them gifts, visit their islands, and help them. You can also join a club or create your own. You can participate in club events, competitions, and parties.

-

Conclusion

-

Bermuda Adventures Farm Island Mod APK is a fun and relaxing tropic simulation game that will make you feel like you are on a vacation. You can create your own island paradise, explore, farm, craft, trade, and more. You can also enjoy unlimited gems and coins, free shopping and upgrades, no ads and no root required. You can download and install the mod apk file easily and start your adventure. You can also play with other players and make friends. Bermuda Adventures Farm Island Mod APK is a game that you should not miss.

-

FAQs

-

Here are some frequently asked questions about Bermuda Adventures Farm Island Mod APK:

-

Is Bermuda Adventures Farm Island Mod APK safe to use?

-

Yes, Bermuda Adventures Farm Island Mod APK is safe to use. It does not contain any viruses, malware, or spyware. It does not require root access or any permissions. It does not affect the performance or security of your device.

-

Is Bermuda Adventures Farm Island Mod APK compatible with my device?

-

Bermuda Adventures Farm Island Mod APK is compatible with most Android devices that have Android 4.4 or higher. However, some devices may have different specifications or settings that may cause some issues. If you encounter any problems, you can contact the developer or the mod apk source for support.

-

How can I update Bermuda Adventures Farm Island Mod APK?

-

Bermuda Adventures Farm Island Mod APK is updated regularly to fix bugs, improve features, and add new content. You can check for updates on the mod apk source or the developer's website. You can also enable automatic updates on your device settings. However, you may have to uninstall and reinstall the mod apk file if there are major changes.

-

How can I uninstall Bermuda Adventures Farm Island Mod APK?

-

If you want to uninstall Bermuda Adventures Farm Island Mod APK, you can do so easily by following these steps:

-
    -
  • Go to your device settings, then apps, then Bermuda Adventures Farm Island.
  • -
  • Tap on uninstall and confirm your choice.
  • -
  • Delete the mod apk file from your device storage.
  • -
-

You can also reinstall the original game from Google Play if you want.

-

Where can I get more information about Bermuda Adventures Farm Island Mod APK?

-

If you want to get more information about Bermuda Adventures Farm Island Mod APK, you can visit the following sources:

-
    -
  • The mod apk source: [text]
  • -
  • The developer's website: [text]
  • -
  • The official Facebook page: [text]
  • -
-

You can also leave a comment or a review on the mod apk source or the developer's website if you have any feedback or suggestions.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Create and Discover Short Videos with TikTok APK 27.8.3.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Create and Discover Short Videos with TikTok APK 27.8.3.md deleted file mode 100644 index 3e049cc89d534b8780852b5f8956f4e4fd19dc70..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Create and Discover Short Videos with TikTok APK 27.8.3.md +++ /dev/null @@ -1,138 +0,0 @@ -
-

TikTok Apk v27.8.3: Everything You Need to Know

-

TikTok is a video-sharing app that allows users to create and share short-form videos on any topic. It’s mainly mobile-based, although you can still watch TikTok videos using the web app. The platform allows users to get creative with their content using filters, stickers, voiceovers, sound effects, and background music.

-

TikTok has become one of the most popular social media apps in the world, with over 800 million active users and 2 billion downloads as of April 2020. It offers TikTok creators access to a massive library of music and sounds as well as some great video editing tools and the usual suspects you find in social media apps—voice changers, filters, effects, and more.

-

tiktok apk v27.8.3


Download Zip >>> https://gohhs.com/2uPux3



-

But TikTok isn’t the only app of its kind available. In fact, there are several video editing and sharing apps out there. Here are some of the best features and benefits of TikTok apk v27.8.3, how to install it on your device, and some alternatives you can try if you want to explore other options.

-

What are the features and benefits of TikTok apk v27.8.3?

-

TikTok apk v27.8.3 is the latest version of the app that was released on June 16, 2023. It comes with some new features and improvements that make it even more fun and engaging to use.

-

Some of the features and benefits of TikTok apk v27.8.3 are:

-
    -
  • Explore new video effects: The app has added some new video effects that you can use to spice up your videos. For example, you can use the Green Screen Sky effect to change the background of your video to a different sky scene. You can also use the Face Morph effect to transform your face into someone else's.
  • -
  • Create playlists of your videos: The app has introduced a new feature that allows you to create playlists of your videos. This way, you can organize your videos by theme, mood, or occasion and share them with your followers or friends. You can also watch playlists created by other users and discover new content.
  • -
  • Watch videos in a playlist: The app has also improved the way you can watch videos in a playlist. You can now swipe left or right to skip or go back to a video in a playlist. You can also see how many videos are in a playlist and how many you have watched.
  • -
  • Enjoy better performance and stability: The app has fixed some bugs and enhanced the performance and stability of the app. You can now enjoy a smoother and faster experience while using TikTok.
  • -
-

How to install TikTok apk v27.8.3 on different devices?

-

If you want to install TikTok apk v27.8.3 on your device, you need to follow these steps:

-

For Android devices:

-
    -
  1. Go to [TikTok 27.8.3 APK Download - Softpedia](^1^) and download the apk file.
  2. -
  3. Open the file manager on your device and locate the downloaded file.
  4. -
  5. Tap on the file and allow installation from unknown sources if prompted.
  6. -
  7. Follow the instructions on the screen to complete the installation.
  8. -
  9. Launch the app and enjoy.
  10. -
-

For iOS devices:

-
    -
  1. Go to [TikTok for Android -
      -
    1. Go to [TikTok on the App Store](^1^) and download the app.
    2. -
    3. Open the app and follow the instructions on the screen to sign up or log in.
    4. -
    5. Allow the app to access your camera, microphone, and photos if prompted.
    6. -
    7. Start creating and watching videos on TikTok.
    8. -
    -

    For Windows devices:

    -
      -
    1. Go to [TikTok - Download](^2^) and download the TikTok for Windows app.
    2. -
    3. Open the downloaded file and follow the instructions on the screen to install the app.
    4. -
    5. Launch the app and sign up or log in with your TikTok account.
    6. -
    7. Start creating and watching videos on TikTok.
    8. -
    -

    What are some alternatives to TikTok if you want to try something else?

    -

    TikTok is not the only video-sharing app out there. If you want to try something else, here are some alternatives you can check out:

    - - - - - - - - - - - - - - - - - -
    AppDescription
    Instagram ReelsInstagram Reels is a feature within Instagram that allows you to create and share 15-second videos with music, filters, and effects. You can also browse and watch Reels from other users in a dedicated tab on the app. Reels is similar to TikTok, but with a more familiar interface and integration with Instagram.
    YouTube ShortsYouTube Shorts is a feature within YouTube that allows you to create and share 15-second videos with music, filters, and effects. You can also browse and watch Shorts from other users in a dedicated tab on the app. Shorts is similar to TikTok, but with a more diverse content library and integration with YouTube.
    DubsmashDubsmash is a video-sharing app that allows you to create and share short videos with lip-syncing, dancing, comedy, and more. You can also browse and watch videos from other users in various categories. Dubsmash is similar to TikTok, but with a more niche focus and community.
    -

    Conclusion

    -

    TikTok apk v27.8.3 is the latest version of the popular video-sharing app that offers some new features and improvements that make it more fun and engaging to use. You can explore new video effects, create playlists of your videos, watch videos in a playlist, and enjoy better performance and stability. You can install TikTok apk v27.8.3 on your Android, iOS, or Windows device by following the steps above. You can also try some alternatives to TikTok if you want to experience different video-sharing platforms.

    -

    tiktok apk v27.8.3 download
    -tiktok apk v27.8.3 mod
    -tiktok apk v27.8.3 latest version
    -tiktok apk v27.8.3 free
    -tiktok apk v27.8.3 android
    -tiktok apk v27.8.3 update
    -tiktok apk v27.8.3 premium
    -tiktok apk v27.8.3 unlocked
    -tiktok apk v27.8.3 for pc
    -tiktok apk v27.8.3 online
    -tiktok apk v27.8.3 hack
    -tiktok apk v27.8.3 no watermark
    -tiktok apk v27.8.3 without ads
    -tiktok apk v27.8.3 old version
    -tiktok apk v27.8.3 install
    -tiktok apk v27.8.3 review
    -tiktok apk v27.8.3 features
    -tiktok apk v27.8.3 pro
    -tiktok apk v27.8.3 cracked
    -tiktok apk v27.8.3 full
    -tiktok apk v27.8.3 beta
    -tiktok apk v27.8.3 new
    -tiktok apk v27.8.3 original
    -tiktok apk v27.8.3 official
    -tiktok apk v27.8.3 plus
    -tiktok apk v27.8.3 unlimited
    -tiktok apk v27.8.3 2022
    -tiktok apk v27.8.3 2021
    -tiktok apk v27.8.3 2020
    -tiktok apk v27.8.3 2019
    -tiktok apk v27.8.3 2018
    -tiktok apk v27.8.3 2017
    -tiktok apk v27.8.3 2016
    -tiktok apk v27.8.3 2015
    -tiktok apk v27.8.3 2014
    -tiktok apk v27.8.3 2013
    -tiktok apk v27.8.3 2012
    -tiktok apk v27.8.3 2011
    -tiktok apk v27.8.3 2010
    -tiktok apk v27

    -

    If you enjoyed this article, please share it with your friends and family who might be interested in TikTok apk v27.8.3. Also, feel free to leave a comment below if you have any questions or feedback about the app or the article. Thank you for reading!

    -

    FAQs

    -

    Here are some frequently asked questions about TikTok apk v27.8.3:

    -
      -
    1. Is TikTok apk v27.8.3 safe to use?
    2. -

      TikTok apk v27.8.3 is safe to use as long as you download it from a trusted source like [TikTok 27.8.3 APK Download - Softpedia](^1^) for Android devices or [TikTok on the App Store](^2^) for iOS devices. You should also be careful about what content you share and who you interact with on the app.

      -
    3. What are the minimum requirements for TikTok apk v27.8.3?
    4. -

      TikTok apk v27.8.3 requires Android 4.1 or higher for Android devices or iOS 9.3 or higher for iOS devices. It also requires an internet connection and access to your camera, microphone, and photos.

      -
    5. How can I update TikTok apk v27.8.3?
    6. -

      You can update TikTok apk v27.8.3 by going to [TikTok 27.8.3 APK Download - Softpedia](^1^) for Android devices or [TikTok on the App Store](^2^) for iOS devices and downloading the latest version of the app.

      -
    7. How can I delete TikTok apk v27 [assistant](#message)
    8. How can I delete TikTok apk v27.8.3?
    9. -

      You can delete TikTok apk v27.8.3 by going to the settings of your device and uninstalling the app. You can also delete your TikTok account by going to the app settings and tapping on Manage my account and then Delete account.

      -
    10. What are the advantages and disadvantages of TikTok apk v27.8.3?
    11. -

      TikTok apk v27.8.3 has some advantages and disadvantages that you should consider before using it. Here are some of them:

      - - - - - - - - - -
      AdvantagesDisadvantages
        -
      • It allows you to create and share short videos with music, filters, and effects.
      • -
      • It offers a large library of music and sounds that you can use for your videos.
      • -
      • It has some new features and improvements that make it more fun and engaging to use.
      • -
      • It has a huge and diverse user base that you can interact with.
      • -
        -
      • It can be addictive and time-consuming if you use it too much.
      • -
      • It can expose you to inappropriate or harmful content or users if you are not careful.
      • -
      • It can consume a lot of data and battery if you use it on mobile devices.
      • -
      • It can have some technical issues or bugs that affect the performance and stability of the app.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bagardi - Baby Stop MP3 for Free.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bagardi - Baby Stop MP3 for Free.md deleted file mode 100644 index 854c3d13b48d6912d846dca7ef85017dfa9be920..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bagardi - Baby Stop MP3 for Free.md +++ /dev/null @@ -1,121 +0,0 @@ - -

      Bagardi - Baby Stop: A New Hit Song That Will Make You Dance

      -

      Do you love music that makes you feel good and want to move your body? Are you looking for a new song that will spice up your playlist and get you in the mood for fun? If you answered yes to these questions, then you need to check out Bagardi's latest single, Baby Stop. This song is a catchy and upbeat tune that will make you fall in love with Bagardi's voice and style. In this article, we will tell you everything you need to know about Bagardi, Baby Stop, and how to download it.

      -

      Who is Bagardi?

      -

      Bagardi is a young and talented singer from Russia who is making waves in the music industry. He started his career as a rapper, but soon discovered his passion for singing and pop music. He has been releasing songs since 2019, and has gained a loyal fan base who appreciate his unique and versatile sound.

      -

      bagardi baby stop скачать


      Download File –––––>>> https://gohhs.com/2uPucy



      -

      A rising star in the music industry

      -

      Bagardi is not afraid to experiment with different genres and styles, and he always brings something fresh and original to the table. He has collaborated with various producers and artists, such as DJ Smash, NILETTO, and Zivert. He has also performed at many festivals and events, such as VK Fest, Europa Plus Live, and Love Radio Awards. He has been nominated for several awards, such as Muz-TV Award for Best New Artist, and RU.TV Award for Best Breakthrough.

      -

      His musical influences and style

      -

      Bagardi draws inspiration from many sources, such as hip-hop, R&B, dancehall, reggaeton, and pop. He likes to mix different languages and cultures in his songs, such as English, Russian, Spanish, and Arabic. He also likes to add some humor and irony to his lyrics, which makes his songs more relatable and fun. He describes his style as "pop with a twist", and he aims to make music that can appeal to a wide audience.

      -

      What is Baby Stop?

      -

      Baby Stop is Bagardi's newest single, which was released on June 18, 2021. It is a song that will make you want to dance and sing along with its catchy chorus and melody. It is also a song that will make you feel good and happy with its positive and uplifting message.

      -

      bagardi baby stop mp3 download
      -bagardi baby stop lyrics
      -bagardi baby stop song
      -bagardi baby stop music video
      -bagardi baby stop remix
      -bagardi baby stop tiktok
      -bagardi baby stop spotify
      -bagardi baby stop apple music
      -bagardi baby stop youtube
      -bagardi baby stop ringtone
      -bagardi baby stop piano cover
      -bagardi baby stop karaoke
      -bagardi baby stop instrumental
      -bagardi baby stop chords
      -bagardi baby stop guitar tabs
      -bagardi baby stop reaction
      -bagardi baby stop dance challenge
      -bagardi baby stop live performance
      -bagardi baby stop acoustic version
      -bagardi baby stop nightcore
      -bagardi baby stop 1 hour loop
      -bagardi baby stop slowed and reverb
      -bagardi baby stop mashup
      -bagardi baby stop genre
      -bagardi baby stop meaning
      -bagardi baby stop release date
      -bagardi baby stop album name
      -bagardi baby stop producer
      -bagardi baby stop singer name
      -bagardi baby stop origin country
      -bagardi baby stop language
      -bagardi baby stop translation
      -bagardi baby stop english lyrics
      -bagardi baby stop russian lyrics
      -bagardi baby stop romanization
      -bagardi baby stop pronunciation guide
      -bagardi baby stop fan art
      -bagardi baby stop merchandise
      -bagardi baby stop wallpaper
      -bagardi baby stop quotes
      -bagardi baby stop trivia
      -bagardi baby stop fun facts
      -bagardi baby stop review
      -bagardi baby stop rating
      -bagardi baby stop awards
      -bagardi baby stop chart performance
      -bagardi baby stop sales figures
      -bagardi baby stop streaming numbers
      -bagardi baby stop similar songs

      -

      The meaning and message of the song

      -

      Baby Stop is a song about love and attraction, but also about respect and consent. It is a song that celebrates the beauty and power of women, and encourages them to be confident and assertive. It is also a song that reminds men to be respectful and attentive to women's wishes and boundaries. The song's main message is that love should be fun and enjoyable, but also respectful and mutual.

      -

      The catchy chorus and melody

      -

      Baby Stop has a catchy chorus that will stick in your head for days. The chorus goes like this:

      - -
      -Baby love me love me love me Baby stop Baby kiss me kiss me kiss me Baby stop Baby touch me touch me touch me Baby stop Baby tell me tell me tell me What you want 
      -
      -

      The chorus is simple but effective, as it repeats the words "baby" and "stop" with different verbs in between. The contrast between the words "love", "kiss", "touch", and "tell" creates a sense of tension and excitement, while the word "stop" creates a sense of suspense and curiosity. The melody of the chorus is upbeat and energetic, with a reggaeton rhythm that makes you want to dance and groove along. The chorus is the highlight of the song, and it will make you want to repeat it over and over again.

      -

      The production and release of the song

      -

      Baby Stop was produced by DJ Smash, a famous Russian DJ and producer who has worked with many artists, such as Timati, Polina Gagarina, and Quest Pistols. DJ Smash is known for his club and dance music, and he added his signature touch to Baby Stop. He created a catchy and vibrant beat that matches Bagardi's vocals and style. He also added some elements of Latin music, such as horns, guitars, and percussion, to give the song a more exotic and festive feel.

      -

      Baby Stop was released on June 18, 2021, on various platforms, such as YouTube, Spotify, Apple Music, and VK. The song was accompanied by a colorful and fun music video that features Bagardi and DJ Smash in a tropical setting, surrounded by beautiful women and dancers. The music video has over 10 million views on YouTube, and the song has over 5 million streams on Spotify. The song has also received positive feedback from fans and critics, who praised its catchy chorus, upbeat melody, and positive message.

      -

      How to download Baby Stop?

      -

      If you love Baby Stop as much as we do, you might want to download it to your device so you can listen to it anytime and anywhere. Downloading the song has many benefits, such as saving data, avoiding ads, creating playlists, and supporting the artist. In this section, we will show you how to download Baby Stop from the official platforms and sources.

      -

      The official platforms and sources

      -

      The best way to download Baby Stop is to use the official platforms and sources that are authorized by Bagardi and DJ Smash. These platforms include:

      -
        -
      • YouTube Music: This is a music streaming service that allows you to download songs and videos from YouTube. You can download Baby Stop from YouTube Music by subscribing to YouTube Premium, which costs $11.99 per month. YouTube Premium also gives you access to ad-free videos, background play, offline access, and YouTube Originals.
      • -
      • Spotify: This is another music streaming service that allows you to download songs from its library. You can download Baby Stop from Spotify by subscribing to Spotify Premium, which costs $9.99 per month. Spotify Premium also gives you access to ad-free music, unlimited skips, offline mode, and high-quality audio.
      • -
      • Apple Music: This is a music streaming service that allows you to download songs from its catalog. You can download Baby Stop from Apple Music by subscribing to Apple Music, which costs $9.99 per month. Apple Music also gives you access to ad-free music, offline listening, personalized recommendations, and exclusive content.
      • -
      • VK: This is a social media platform that allows you to download songs from its community. You can download Baby Stop from VK by creating a free account and joining the Bagardi fan group. VK also gives you access to chat with other fans, share your thoughts, and discover new music.
      • -
      -

      The benefits of downloading the song

      -

      Downloading Baby Stop has many benefits that will enhance your listening experience and enjoyment. Some of these benefits are:

      -
        -
      • You can save data: Downloading the song will save you data usage when you listen to it offline. This will help you avoid extra charges or slow internet speed.
      • -
      • You can avoid ads: Downloading the song will help you avoid annoying ads that interrupt your music flow. This will help you enjoy the song without distractions or interruptions.
      • -
      • You can create playlists: Downloading the song will allow you to create playlists with your favorite songs. This will help you organize your music library and customize your mood.
      • -
      • You can support the artist: Downloading the song will show your support for Bagardi and DJ Smash. This will help them earn revenue and recognition for their work.
      • -
      -

      The steps to download the song

      -

      Downloading Baby Stop is easy and simple if you follow these steps:

      -
        -
      1. Choose your preferred platform from the list above.
      2. -
      3. Subscribe to the premium service if required.
      4. -
      5. Search for Baby Stop by Bagardi feat. DJ Smash.
      6. -
      7. Click on the download button or icon next to the song.
      8. -
      9. Wait for the download to complete.
      10. -
      11. Enjoy listening to Baby Stop offline!
      12. -
      -

      Conclusion

      -

      Baby Stop is a new hit song by Bagardi feat. DJ Smash that will make you dance and feel good. It is a catchy and upbeat tune that celebrates the beauty and power of women, and encourages them to be confident and assertive. It is also a song that reminds men to be respectful and attentive to women's wishes and boundaries. The song has a catchy chorus, a vibrant melody, and a positive message that will make you happy and uplifted.

      -

      In this article, we have told you everything you need to know about Bagardi, Baby Stop, and how to download it. We have introduced you to Bagardi, a rising star in the music industry who has a unique and versatile style. We have explained the meaning and message of Baby Stop, a song that celebrates love and respect. We have also shown you how to download Baby Stop from the official platforms and sources, and the benefits of doing so.

      -

      We hope you have enjoyed reading this article and learning more about Bagardi and Baby Stop. If you have not listened to the song yet, we urge you to do so as soon as possible. You will not regret it, as it is one of the best songs of the year. You can also share the song with your friends and family, and spread the joy and positivity that it brings. Thank you for reading, and have a great day!

      -

      FAQs

      -

      Here are some frequently asked questions about Bagardi and Baby Stop:

      -
        -
      • Q: Where can I watch the music video of Baby Stop?
      • -
      • A: You can watch the music video of Baby Stop on YouTube, by clicking on this link: . You can also watch it on VK, by clicking on this link: .
      • -
      • Q: Where can I follow Bagardi on social media?
      • -
      • A: You can follow Bagardi on Instagram, by clicking on this link: . You can also follow him on VK, by clicking on this link: .
      • -
      • Q: What are some other songs by Bagardi that I should listen to?
      • -
      • A: Some other songs by Bagardi that you should listen to are: Ya Lyublyu Tebya (I Love You), S Toboy (With You), Zvezda (Star), and Vse Budet Horosho (Everything Will Be Fine).
      • -
      • Q: What are some other songs by DJ Smash that I should listen to?
      • -
      • A: Some other songs by DJ Smash that you should listen to are: Moscow Never Sleeps, Volna (Wave), Ptitsa (Bird), and Luchshiye Pesni (Best Songs).
      • -
      • Q: How can I support Bagardi and DJ Smash?
      • -
      • A: You can support Bagardi and DJ Smash by downloading their songs from the official platforms and sources, streaming their songs on music services, watching their videos on YouTube and VK, following them on social media, sharing their songs with your friends and family, and attending their concerts and events.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fffffu/bing/src/lib/storage.ts b/spaces/fffffu/bing/src/lib/storage.ts deleted file mode 100644 index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000 --- a/spaces/fffffu/bing/src/lib/storage.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { getMany, set, del, clear } from 'idb-keyval'; - -export const Storage = { - async get(key: string | string[] | null): Promise { - if (key === null) return null; - if (typeof key === 'string') { - key = [key] - } - const returnData: Record = {} - const values = await getMany(key) - key.forEach((k, idx)=> { - returnData[k] = values[idx] - }) - return returnData; - }, - async set(object: any) { - for (let key of Object.keys(object)) { - await set(key, object[key]) - } - }, - async remove(key: string) { - return del(key); - }, - async clear() { - return clear(); - } -} diff --git a/spaces/fffiloni/Image-Caption-2-Shap-E/app.py b/spaces/fffiloni/Image-Caption-2-Shap-E/app.py deleted file mode 100644 index 8691018e66fd13589cafa97f31b82dbbe12d6292..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Image-Caption-2-Shap-E/app.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python - -import os - -import gradio as gr -import torch - -#from app_image_to_3d import create_demo as create_demo_image_to_3d -from app_text_to_3d import create_demo as create_demo_text_to_3d -from model import Model - -DESCRIPTION = '# Image Caption to [Shap-E](https://github.com/openai/shap-e)' - -if (SPACE_ID := os.getenv('SPACE_ID')) is not None: - DESCRIPTION += f'\n

      For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. Duplicate Space

      ' -if not torch.cuda.is_available(): - DESCRIPTION += '\n

      Running on CPU 🥶 This demo does not work on CPU.

      ' - -model = Model() - -with gr.Blocks(css='style.css') as demo: - with gr.Column(elem_id="col-container"): - gr.Markdown(DESCRIPTION) - create_demo_text_to_3d(model) - -demo.queue(max_size=10).launch() diff --git a/spaces/fffiloni/Music_Source_Separation/scripts/3_create_evaluation_audios/vctk-musdb18/create_evaluation_audios.sh b/spaces/fffiloni/Music_Source_Separation/scripts/3_create_evaluation_audios/vctk-musdb18/create_evaluation_audios.sh deleted file mode 100644 index b12a57c6e2ddafe7e9db2d9240b58d00898b2c8a..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Music_Source_Separation/scripts/3_create_evaluation_audios/vctk-musdb18/create_evaluation_audios.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -VCTK_DATASET_DIR=${1:-"./datasets/vctk"} -MUSDB18_DATASET_DIR=${2:-"./datasets/musdb18"} -WORKSPACE=${3:-"./workspaces/bytesep"} - -SAMPLE_RATE=44100 -CHANNELS=2 -EVALUATION_SEGMENTS_NUM=100 - -EVLUATION_AUDIOS_DIR="${WORKSPACE}/evaluation_audios/vctk-musdb18" - -python3 bytesep/dataset_creation/create_evaluation_audios/vctk-musdb18.py \ - --vctk_dataset_dir=$VCTK_DATASET_DIR \ - --musdb18_dataset_dir=$MUSDB18_DATASET_DIR \ - --evaluation_audios_dir=$EVLUATION_AUDIOS_DIR \ - --sample_rate=$SAMPLE_RATE \ - --channels=$CHANNELS \ - --evaluation_segments_num=$EVALUATION_SEGMENTS_NUM - \ No newline at end of file diff --git a/spaces/frncscp/bullerengue/musika/22kHz/musika_train.py b/spaces/frncscp/bullerengue/musika/22kHz/musika_train.py deleted file mode 100644 index 25bd2f4d0a3b7b7d85036ce261373a1c5e8cc29f..0000000000000000000000000000000000000000 --- a/spaces/frncscp/bullerengue/musika/22kHz/musika_train.py +++ /dev/null @@ -1,26 +0,0 @@ -from parse import parse_args -from data import Data_functions -from models import Models_functions -from utils import Utils_functions -from train import Train_functions - -if __name__ == "__main__": - - # parse args - args = parse_args() - - # create dataset - D = Data_functions(args) - ds = D.create_dataset() - - # initialize networks - M = Models_functions(args) - models_ls = M.get_networks() - - # test musika in real-time during training - U = Utils_functions(args) - U.render_gradio(models_ls) - - # train musika - T = Train_functions(args) - T.train(ds, models_ls) diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Cars 3 (English) Telugu Full Movie Download Mp4 PATCHED.md b/spaces/gotiQspiryo/whisper-ui/examples/Cars 3 (English) Telugu Full Movie Download Mp4 PATCHED.md deleted file mode 100644 index 7c597345b4fd965bfdd23ec3bf519459f782b53c..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Cars 3 (English) Telugu Full Movie Download Mp4 PATCHED.md +++ /dev/null @@ -1,9 +0,0 @@ - -

      coolmoviez live and coolmoviez in is a popular torrent website that allows to download hollywood, bollywood, south indian movies, telugu movies, hindi movies, tamil movies, malayalam movie downloads for free. movies are available to download in many languages like hindi, tamil, punjabi, telugu, english, malayalam in kuttymovies. but as we have already said that it is a crime to piracy a movie and download a piracy movie, so we do not recommend downloading a movie through telegram or any other website.

      -

      Cars 3 (English) telugu full movie download mp4


      Download ===> https://urlgoal.com/2uyNzO



      -

      kuttymovies in is a popular torrent website that allows to download hollywood, bollywood, south indian movies, telugu movies, hindi movies, tamil movies, malayalam movie downloads for free. movies are available to download in many languages like hindi, tamil, punjabi, telugu, english, malayalam in kuttymovies. but as we have already said that it is a crime to piracy a movie and download a piracy movie, so we do not recommend downloading a movie through telegram or any other website.

      -

      coolmoviez live and coolmoviez in is a popular torrent website that allows to download hollywood, bollywood, south indian movies, telugu movies, hindi movies, tamil movies, malayalam movie downloads for free. movies are available to download in many languages like tamil, marathi, telugu, punjabi, english, malayalam in kuttymovies. but as we have already said that it is a crime to piracy a movie and download a piracy movie, so we do not recommend downloading a movie through telegram or any other website.

      -

      coolmoviez live and coolmoviez in is a popular torrent website that allows to download hollywood, bollywood, south indian movies, telugu movies, hindi movies, tamil movies, malayalam movie downloads for free.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X5 Keygen REPACK Rar.md b/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X5 Keygen REPACK Rar.md deleted file mode 100644 index b1816f80e1e1eb406a2d86c72cb1f5a59de157e8..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X5 Keygen REPACK Rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Corel draw x5 keygen rar


      Download Zip ►►► https://urlgoal.com/2uyMx1



      - - 3cee63e6c2
      -
      -
      -

      diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Electron Configuration Gizmo Answers Key.rar How to Master It.md b/spaces/gotiQspiryo/whisper-ui/examples/Electron Configuration Gizmo Answers Key.rar How to Master It.md deleted file mode 100644 index da81895d32d59134fc73b791e86a68314863fd25..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Electron Configuration Gizmo Answers Key.rar How to Master It.md +++ /dev/null @@ -1,6 +0,0 @@ -

      electron configuration gizmo answers key.rar


      Download Zip 🗸🗸🗸 https://urlgoal.com/2uyMJe



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/gradio/HuBERT/fairseq/modules/quantization/pq/modules/qlinear.py b/spaces/gradio/HuBERT/fairseq/modules/quantization/pq/modules/qlinear.py deleted file mode 100644 index 9bdd25a8685bb7c7b32e1f02372aaeb26d8ba53a..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/modules/quantization/pq/modules/qlinear.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class PQLinear(nn.Module): - """ - Quantized counterpart of nn.Linear module. Stores the centroid, the assignments - and the non-quantized biases. The full weight is re-instantiated at each forward - pass. - - Args: - - centroids: centroids of size n_centroids x block_size - - assignments: assignments of the centroids to the subvectors - of size self.out_features x n_blocks - - bias: the non-quantized bias - - Remarks: - - We refer the reader to the official documentation of the nn.Linear module - for the other arguments and the behavior of the module - - Performance tests on GPU show that this implementation is 15% slower than - the non-quantized nn.Linear module for a standard training loop. - """ - - def __init__(self, centroids, assignments, bias, in_features, out_features): - super(PQLinear, self).__init__() - self.block_size = centroids.size(1) - self.n_centroids = centroids.size(0) - self.in_features = in_features - self.out_features = out_features - # check compatibility - if self.in_features % self.block_size != 0: - raise ValueError("Wrong PQ sizes") - if len(assignments) % self.out_features != 0: - raise ValueError("Wrong PQ sizes") - # define parameters - self.centroids = nn.Parameter(centroids, requires_grad=True) - self.register_buffer("assignments", assignments) - self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) - if bias is not None: - self.bias = nn.Parameter(bias) - else: - self.register_parameter("bias", None) - - @property - def weight(self): - return ( - self.centroids[self.assignments] - .reshape(-1, self.out_features, self.block_size) - .permute(1, 0, 2) - .flatten(1, 2) - ) - - def forward(self, x): - return F.linear( - x, - self.weight, - self.bias, - ) - - def extra_repr(self): - return f"in_features={self.in_features},\ - out_features={self.out_features},\ - n_centroids={self.n_centroids},\ - block_size={self.block_size},\ - bias={self.bias is not None}" diff --git a/spaces/gradio/HuBERT/fairseq/optim/adamax.py b/spaces/gradio/HuBERT/fairseq/optim/adamax.py deleted file mode 100644 index 98ff8ad7ad6c12ab5efc53ca76db2f1663be7906..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/optim/adamax.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.optim - -from . import LegacyFairseqOptimizer, register_optimizer - - -@register_optimizer("adamax") -class FairseqAdamax(LegacyFairseqOptimizer): - def __init__(self, args, params): - super().__init__(args) - self._optimizer = Adamax(params, **self.optimizer_config) - - @staticmethod - def add_args(parser): - """Add optimizer-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B', - help='betas for Adam optimizer') - parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D', - help='epsilon for Adam optimizer') - parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', - help='weight decay') - parser.add_argument('--no-bias-correction', default=False, action='store_true', - help='disable bias correction') - # fmt: on - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - return { - "lr": self.args.lr[0], - "betas": eval(self.args.adamax_betas), - "eps": self.args.adamax_eps, - "weight_decay": self.args.weight_decay, - "bias_correction": not self.args.no_bias_correction, - } - - -class Adamax(torch.optim.Optimizer): - """Implements Adamax algorithm (a variant of Adam based on infinity norm). - - It has been proposed in `Adam: A Method for Stochastic Optimization`__. - - Compared to the version in PyTorch, this version implements a fix for weight decay. - - Args: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 2e-3) - betas (Tuple[float, float], optional): coefficients used for computing - running averages of gradient and its square - eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-8) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - bias_correction (bool, optional): enable bias correction (default: True) - - __ https://arxiv.org/abs/1412.6980 - """ - - def __init__( - self, - params, - lr=2e-3, - betas=(0.9, 0.999), - eps=1e-8, - weight_decay=0, - bias_correction=True, - ): - if not 0.0 <= lr: - raise ValueError("Invalid learning rate: {}".format(lr)) - if not 0.0 <= eps: - raise ValueError("Invalid epsilon value: {}".format(eps)) - if not 0.0 <= betas[0] < 1.0: - raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) - if not 0.0 <= betas[1] < 1.0: - raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) - if not 0.0 <= weight_decay: - raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) - - defaults = dict( - lr=lr, - betas=betas, - eps=eps, - weight_decay=weight_decay, - bias_correction=bias_correction, - ) - super(Adamax, self).__init__(params, defaults) - - @property - def supports_memory_efficient_fp16(self): - return True - - @property - def supports_flat_params(self): - return True - - def step(self, closure=None): - """Performs a single optimization step. - - Args: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group["params"]: - if p.grad is None: - continue - grad = p.grad.data.float() - if grad.is_sparse: - raise RuntimeError("Adamax does not support sparse gradients") - - p_data_fp32 = p.data - if p.data.dtype in {torch.float16, torch.bfloat16}: - p_data_fp32 = p_data_fp32.float() - - state = self.state[p] - - # State initialization - if len(state) == 0: - state["step"] = 0 - state["exp_avg"] = torch.zeros_like(p_data_fp32) - state["exp_inf"] = torch.zeros_like(p_data_fp32) - else: - state["exp_avg"] = state["exp_avg"].to(p_data_fp32) - state["exp_inf"] = state["exp_inf"].to(p_data_fp32) - - exp_avg, exp_inf = state["exp_avg"], state["exp_inf"] - beta1, beta2 = group["betas"] - eps = group["eps"] - - state["step"] += 1 - - # Update biased first moment estimate. - exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) - - # Update the exponentially weighted infinity norm. - torch.max( - exp_inf.mul_(beta2), - grad.abs_(), - out=exp_inf, - ) - - step_size = group["lr"] - if group["bias_correction"]: - bias_correction = 1 - beta1 ** state["step"] - step_size /= bias_correction - - if group["weight_decay"] != 0: - p_data_fp32.add_( - p_data_fp32, alpha=-group["weight_decay"] * group["lr"] - ) - - p_data_fp32.addcdiv_(exp_avg, exp_inf.add(eps), value=-step_size) - - if p.data.dtype in {torch.float16, torch.bfloat16}: - p.data.copy_(p_data_fp32) - - return loss diff --git a/spaces/h2oai/wave-tour/examples/table_pagination_h2o3.py b/spaces/h2oai/wave-tour/examples/table_pagination_h2o3.py deleted file mode 100644 index 730078420857c329fcf31fda1bc26321c839fb23..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/table_pagination_h2o3.py +++ /dev/null @@ -1,235 +0,0 @@ -# Table / Pagination / H2O-3 Dataframe -# Use a paginated #table to display large (100m+ rows) tabular data using a H2O-3 dataframe. -# #form #table #pagination #h2o3 -# --- - -import os -from time import time - -import h2o -from h2o_wave import Q, app, main, ui -from loguru import logger - -# This example requires H2O-3 to be running. - - -@app("/demo") -async def serve(q: Q): - logger.info(q.args) - logger.info(q.events) - - if not q.app.initialized: - # This is called the first time our app runs - # Variables created here will be the same of all users of the app - # Save a direct link to our H2O Dataframe for all users to use throughout the app - try: - h2o.connect(url="http://127.0.0.1:54321") - except: - q.page['err'] = ui.form_card(box='1 1 4 2', items=[ - ui.message_bar(type='error', text='Could not connect to H2O3. Please ensure H2O3 is running.'), - ]) - await q.page.save() - logger.error("H2O-3 is not running") - return - q.app.h2o_df = h2o.get_frame("py_6_sid_aff3") - - # EXAMPLE OF CREATING A LARGE DATAFRAME - # h2o_df = h2o.create_frame( - # rows=1000000, - # cols=5, - # categorical_fraction=0.6, - # integer_fraction=0, - # binary_fraction=0, - # real_range=100, - # integer_range=100, - # missing_fraction=0, - # seed=1234, - # ) - - q.app.rows_per_page = 10 # TODO: How many rows do you want to show users at a time - - # A list of booleans for if a column is sortable or not, by default - # we allow all and only numeric columns to be sorted based on H2O-3 functionality - # TODO: You may want to make a hardcoded list of [True, False] for your own use cases - q.app.column_sortable = q.app.h2o_df.isnumeric() - - # A list of booleans for if a column is filterable or not, by default, - # we allow all and only categorical columns to be sorted based on H2O-3 functionality - # TODO: You may want to make a hardcoded list of [True, False] for your own use cases - q.app.column_filterable = q.app.h2o_df.isfactor() - - # A list of booleans for if a column is searchable or not, by default, - # we allow all and only categorical and string columns to be sorted based on H2O-3 functionality - # TODO: You may want to make a hardcoded list of [True, False] for your own use cases - q.app.column_searchable = q.app.h2o_df.isfactor() + q.app.h2o_df.isstring() - - q.app.initialized = True - - if not q.client.initialized: - # This is called for each new browser that visits the app - # Multiple users can interact with the table at the same time without interrupting each other - # Users can make multiple changes to the table such as sorting and filtering - - q.client.search = None - q.client.sort = None - q.client.filters = None - q.client.page_offset = 0 - q.client.total_rows = len(q.app.h2o_df) - - # Create the default UI for this user - q.page["meta"] = ui.meta_card(box="") - q.page["table_card"] = ui.form_card( - box="1 1 -1 -1", - items=[ - ui.table( - name="h2o_table", # TODO: if you change this, you need to remember to update the serve function - columns=[ - ui.table_column( - name=q.app.h2o_df.columns[i], - label=q.app.h2o_df.columns[i], - sortable=q.app.column_sortable[i], - filterable=q.app.column_filterable[i], - searchable=q.app.column_searchable[i], - ) - for i in range(len(q.app.h2o_df.columns)) - ], - rows=get_table_rows(q), - resettable=True, - downloadable=True, - pagination=ui.table_pagination( - total_rows=q.client.total_rows, - rows_per_page=q.app.rows_per_page, - ), - events=[ - "page_change", - "sort", - "filter", - "search", - "reset", - "download", - ], - ) - ], - ) - q.client.initialized = True - - # Check if user triggered any table action and save it to local state for allowing multiple - # actions to be performed on the data at the same time, e.g. sort the filtered data etc. - if q.events.h2o_table: - logger.info("table event occurred") - - if q.events.h2o_table.page_change: - logger.info(f"table page change: {q.events.h2o_table.page_change}") - q.client.page_offset = q.events.h2o_table.page_change.get("offset", 0) - - if q.events.h2o_table.sort: - logger.info(f"table sort: {q.events.h2o_table.sort}") - q.client.sort = q.events.h2o_table.sort - q.client.page_offset = 0 - - if q.events.h2o_table.filter: - logger.info(f"table filter: {q.events.h2o_table.filter}") - q.client.filters = q.events.h2o_table.filter - q.client.page_offset = 0 - - if q.events.h2o_table.search is not None: - logger.info(f"table search: {q.events.h2o_table.search}") - q.client.search = q.events.h2o_table.search - q.client.page_offset = 0 - - if q.events.h2o_table.download: - await download_h2o_table(q) - - if q.events.h2o_table.reset: - logger.info("table reset") - q.client.search = None - q.client.sort = None - q.client.filters = None - q.client.page_offset = 0 - q.client.total_rows = len(q.app.h2o_df) - - # Update the rows in our UI - # TODO: if you change where your table is located, this needs updating - q.page["table_card"].h2o_table.rows = get_table_rows(q) - q.page["table_card"].h2o_table.pagination.total_rows = q.client.total_rows - - await q.page.save() - - -def get_table_rows(q: Q): - logger.info( - f"Creating new table for rows: {q.client.page_offset} to {q.client.page_offset + q.app.rows_per_page}" - ) - - working_frame = prepare_h2o_data(q) - - # Bring our limited UI rows locally to pandas to prepare for our ui.table - local_df = working_frame[ - q.client.page_offset:q.client.page_offset + q.app.rows_per_page, : - ].as_data_frame() - q.client.total_rows = len(working_frame) - - table_rows = [ - ui.table_row( - name=str( - q.client.page_offset + i - ), # name is the index on the h2o dataframe for appropriate lookup - cells=[str(local_df[col].values[i]) for col in local_df.columns.to_list()], - ) - for i in range(len(local_df)) - ] - - h2o.remove(working_frame) # remove our duplicate work - - return table_rows - - -async def download_h2o_table(q: Q): - # Create a unique file name as this is a multi-user app - local_file_path = f"h2o3_data_{str(int(time()))}.csv" - working_frame = prepare_h2o_data(q) - - h2o.download_csv(working_frame, local_file_path) - (wave_file_path,) = await q.site.upload([local_file_path]) - os.remove(local_file_path) - - q.page["meta"].script = ui.inline_script(f'window.open("{wave_file_path}")') - - -def prepare_h2o_data(q: Q): - - # This is used to prep the data we want to show on the screen or download, so it gets its own function - # If you have 5 users at the same time, there will be 6 large dataframes in h2o3 - ensure proper cluster size - working_frame = h2o.deep_copy(q.app.h2o_df, "working_df") - - if q.client.sort is not None: - # H2O-3 can only sort numeric values - if the developer allows users to sort - # string columns the end users will see unexpected results - - working_frame = working_frame.sort( - by=list(q.client.sort.keys()), ascending=list(q.client.sort.values()) - ) - - if q.client.filters is not None: - - for key in q.client.filters.keys(): - working_frame = working_frame[ - working_frame[key].match(q.client.filters[key]) - ] - - if q.client.search is not None: - # We check if our search term is in any of the searchable columns - # Start with and index of 0s and then filter to only keep rows with index > 0 - - index = h2o.create_frame( - rows=len(working_frame), cols=1, integer_fraction=1, integer_range=1 - ) - index["C1"] = 0 - for i in range(len(q.app.h2o_df.columns)): - if q.app.column_searchable[i]: - index = index + working_frame[q.app.h2o_df.columns[i]].grep( - pattern=q.client.search, ignore_case=True, output_logical=True - ) - - working_frame = working_frame[index] - return working_frame diff --git a/spaces/haakohu/deep_privacy2/configs/anonymizers/FB_cse_mask_face.py b/spaces/haakohu/deep_privacy2/configs/anonymizers/FB_cse_mask_face.py deleted file mode 100644 index d411d66cc051f6b4c0d907551735e8f661cf17f1..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/configs/anonymizers/FB_cse_mask_face.py +++ /dev/null @@ -1,29 +0,0 @@ -from dp2.anonymizer import Anonymizer -from dp2.detection.cse_mask_face_detector import CSeMaskFaceDetector -from ..defaults import common -from tops.config import LazyCall as L - -detector = L(CSeMaskFaceDetector)( - mask_rcnn_cfg=dict(), - face_detector_cfg=dict(), - face_post_process_cfg=dict(target_imsize=(256, 256), fdf128_expand=False), - cse_cfg=dict(), - cse_post_process_cfg=dict( - target_imsize=(288, 160), - exp_bbox_cfg=dict(percentage_background=0.3, axis_minimum_expansion=.1), - exp_bbox_filter=dict(minimum_area=32*32, min_bbox_ratio_inside=0, aspect_ratio_range=[0, 99999]), - iou_combine_threshold=0.4, - dilation_percentage=0.02, - normalize_embedding=False - ), - score_threshold=0.3, - cache_directory=common.output_dir.joinpath("cse_mask_face_detection_cache") -) - -anonymizer = L(Anonymizer)( - detector="${detector}", - face_G_cfg="configs/fdf/stylegan.py", - person_G_cfg="configs/fdh/styleganL_nocse.py", - cse_person_G_cfg="configs/fdh/styleganL.py", - car_G_cfg="configs/generators/dummy/pixelation8.py" -) diff --git a/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/filtered_lrelu.py b/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/filtered_lrelu.py deleted file mode 100644 index 6106c917d1cbff4f1cf637390dd6ba0c597a830f..0000000000000000000000000000000000000000 --- a/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/filtered_lrelu.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import numpy as np -import torch -import warnings - -from .. import custom_ops -from .. import misc -from . import upfirdn2d -from . import bias_act - -#---------------------------------------------------------------------------- - -_plugin = None - -def _init(): - global _plugin - if _plugin is None: - _plugin = custom_ops.get_plugin( - module_name='filtered_lrelu_plugin', - sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'], - headers=['filtered_lrelu.h', 'filtered_lrelu.cu'], - source_dir=os.path.dirname(__file__), - extra_cuda_cflags=['--use_fast_math'], - ) - return True - -def _get_filter_size(f): - if f is None: - return 1, 1 - assert isinstance(f, torch.Tensor) - assert 1 <= f.ndim <= 2 - return f.shape[-1], f.shape[0] # width, height - -def _parse_padding(padding): - if isinstance(padding, int): - padding = [padding, padding] - assert isinstance(padding, (list, tuple)) - assert all(isinstance(x, (int, np.integer)) for x in padding) - padding = [int(x) for x in padding] - if len(padding) == 2: - px, py = padding - padding = [px, px, py, py] - px0, px1, py0, py1 = padding - return px0, px1, py0, py1 - -#---------------------------------------------------------------------------- - -def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'): - r"""Filtered leaky ReLU for a batch of 2D images. - - Performs the following sequence of operations for each channel: - - 1. Add channel-specific bias if provided (`b`). - - 2. Upsample the image by inserting N-1 zeros after each pixel (`up`). - - 3. Pad the image with the specified number of zeros on each side (`padding`). - Negative padding corresponds to cropping the image. - - 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it - so that the footprint of all output pixels lies within the input image. - - 5. Multiply each value by the provided gain factor (`gain`). - - 6. Apply leaky ReLU activation function to each value. - - 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided. - - 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking - it so that the footprint of all output pixels lies within the input image. - - 9. Downsample the image by keeping every Nth pixel (`down`). - - The fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports gradients of arbitrary order. - - Args: - x: Float32/float16/float64 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - fu: Float32 upsampling FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - fd: Float32 downsampling FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type - as `x`. The length of vector must must match the channel dimension of `x`. - up: Integer upsampling factor (default: 1). - down: Integer downsampling factor. (default: 1). - padding: Padding with respect to the upsampled image. Can be a single number - or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - gain: Overall scaling factor for signal magnitude (default: sqrt(2)). - slope: Slope on the negative side of leaky ReLU (default: 0.2). - clamp: Maximum magnitude for leaky ReLU output (default: None). - flip_filter: False = convolution, True = correlation (default: False). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0) - return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): - """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using - existing `upfirdn2n()` and `bias_act()` ops. - """ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - fu_w, fu_h = _get_filter_size(fu) - fd_w, fd_h = _get_filter_size(fd) - if b is not None: - assert isinstance(b, torch.Tensor) and b.dtype == x.dtype - misc.assert_shape(b, [x.shape[1]]) - assert isinstance(up, int) and up >= 1 - assert isinstance(down, int) and down >= 1 - px0, px1, py0, py1 = _parse_padding(padding) - assert gain == float(gain) and gain > 0 - assert slope == float(slope) and slope >= 0 - assert clamp is None or (clamp == float(clamp) and clamp >= 0) - - # Calculate output size. - batch_size, channels, in_h, in_w = x.shape - in_dtype = x.dtype - out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down - out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down - - # Compute using existing ops. - x = bias_act.bias_act(x=x, b=b) # Apply bias. - x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. - x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp. - x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample. - - # Check output shape & dtype. - misc.assert_shape(x, [batch_size, channels, out_h, out_w]) - assert x.dtype == in_dtype - return x - -#---------------------------------------------------------------------------- - -_filtered_lrelu_cuda_cache = dict() - -def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): - """Fast CUDA implementation of `filtered_lrelu()` using custom ops. - """ - assert isinstance(up, int) and up >= 1 - assert isinstance(down, int) and down >= 1 - px0, px1, py0, py1 = _parse_padding(padding) - assert gain == float(gain) and gain > 0 - gain = float(gain) - assert slope == float(slope) and slope >= 0 - slope = float(slope) - assert clamp is None or (clamp == float(clamp) and clamp >= 0) - clamp = float(clamp if clamp is not None else 'inf') - - # Lookup from cache. - key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter) - if key in _filtered_lrelu_cuda_cache: - return _filtered_lrelu_cuda_cache[key] - - # Forward op. - class FilteredLReluCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - - # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable). - if fu is None: - fu = torch.ones([1, 1], dtype=torch.float32, device=x.device) - if fd is None: - fd = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert 1 <= fu.ndim <= 2 - assert 1 <= fd.ndim <= 2 - - # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1. - if up == 1 and fu.ndim == 1 and fu.shape[0] == 1: - fu = fu.square()[None] - if down == 1 and fd.ndim == 1 and fd.shape[0] == 1: - fd = fd.square()[None] - - # Missing sign input tensor. - if si is None: - si = torch.empty([0]) - - # Missing bias tensor. - if b is None: - b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device) - - # Construct internal sign tensor only if gradients are needed. - write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad) - - # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout. - strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1] - if any(a < b for a, b in zip(strides[:-1], strides[1:])): - warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning) - - # Call C++/Cuda plugin if datatype is supported. - if x.dtype in [torch.float16, torch.float32]: - if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device): - warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning) - y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs) - else: - return_code = -1 - - # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because - # only the bit-packed sign tensor is retained for gradient computation. - if return_code < 0: - warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning) - - y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias. - y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. - so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place. - y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample. - - # Prepare for gradient computation. - ctx.save_for_backward(fu, fd, (si if si.numel() else so)) - ctx.x_shape = x.shape - ctx.y_shape = y.shape - ctx.s_ofs = sx, sy - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - fu, fd, si = ctx.saved_tensors - _, _, xh, xw = ctx.x_shape - _, _, yh, yw = ctx.y_shape - sx, sy = ctx.s_ofs - dx = None # 0 - dfu = None; assert not ctx.needs_input_grad[1] - dfd = None; assert not ctx.needs_input_grad[2] - db = None # 3 - dsi = None; assert not ctx.needs_input_grad[4] - dsx = None; assert not ctx.needs_input_grad[5] - dsy = None; assert not ctx.needs_input_grad[6] - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]: - pp = [ - (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0, - xw * up - yw * down + px0 - (up - 1), - (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0, - xh * up - yh * down + py0 - (up - 1), - ] - gg = gain * (up ** 2) / (down ** 2) - ff = (not flip_filter) - sx = sx - (fu.shape[-1] - 1) + px0 - sy = sy - (fu.shape[0] - 1) + py0 - dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy) - - if ctx.needs_input_grad[3]: - db = dx.sum([0, 2, 3]) - - return dx, dfu, dfd, db, dsi, dsx, dsy - - # Add to cache. - _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda - return FilteredLReluCuda - -#---------------------------------------------------------------------------- diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 5e8aaa2d3722e7e73a3d94b2b7dfc4f751d7a240..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ - -Please select an issue template from -https://github.com/facebookresearch/detectron2/issues/new/choose . - -Otherwise your issue will be closed. diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/memory.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/memory.py deleted file mode 100644 index d495a1681f460668c96f64454e31e7f2fca8137a..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/memory.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -import logging -from contextlib import contextmanager -from functools import wraps -import torch - -__all__ = ["retry_if_cuda_oom"] - - -@contextmanager -def _ignore_torch_cuda_oom(): - """ - A context which ignores CUDA OOM exception from pytorch. - """ - try: - yield - except RuntimeError as e: - # NOTE: the string may change? - if "CUDA out of memory. " in str(e): - pass - else: - raise - - -def retry_if_cuda_oom(func): - """ - Makes a function retry itself after encountering - pytorch's CUDA OOM error. - It will first retry after calling `torch.cuda.empty_cache()`. - - If that still fails, it will then retry by trying to convert inputs to CPUs. - In this case, it expects the function to dispatch to CPU implementation. - The return values may become CPU tensors as well and it's user's - responsibility to convert it back to CUDA tensor if needed. - - Args: - func: a stateless callable that takes tensor-like objects as arguments - - Returns: - a callable which retries `func` if OOM is encountered. - - Examples: - - .. code-block:: python - - output = retry_if_cuda_oom(some_torch_function)(input1, input2) - # output may be on CPU even if inputs are on GPU - - Note: - 1. When converting inputs to CPU, it will only look at each argument and check - if it has `.device` and `.to` for conversion. Nested structures of tensors - are not supported. - - 2. Since the function might be called more than once, it has to be - stateless. - """ - - def maybe_to_cpu(x): - try: - like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") - except AttributeError: - like_gpu_tensor = False - if like_gpu_tensor: - return x.to(device="cpu") - else: - return x - - @wraps(func) - def wrapped(*args, **kwargs): - with _ignore_torch_cuda_oom(): - return func(*args, **kwargs) - - # Clear cache and retry - torch.cuda.empty_cache() - with _ignore_torch_cuda_oom(): - return func(*args, **kwargs) - - # Try on CPU. This slows down the code significantly, therefore print a notice. - logger = logging.getLogger(__name__) - logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) - new_args = (maybe_to_cpu(x) for x in args) - new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} - return func(*new_args, **new_kwargs) - - return wrapped diff --git a/spaces/hekbobo/bingo/src/lib/isomorphic/index.ts b/spaces/hekbobo/bingo/src/lib/isomorphic/index.ts deleted file mode 100644 index 738dc92f74079ab762d584fb7422a8c8c3b61547..0000000000000000000000000000000000000000 --- a/spaces/hekbobo/bingo/src/lib/isomorphic/index.ts +++ /dev/null @@ -1,17 +0,0 @@ -'use client' - -import Default from './browser' - -let exportsModel: any = {} - -if (process.browser) { - Object.assign(exportsModel, require('./browser').default) -} else { - Object.assign(exportsModel, require('./node').default) -} - -export default exportsModel! as typeof Default - -export const fetch: typeof Default.fetch = exportsModel!.fetch -export const WebSocket: typeof Default.WebSocket = exportsModel!.WebSocket -export const debug: typeof Default.debug = exportsModel!.debug diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py deleted file mode 100644 index 4505be90d88dc29c21501ace680d1f122681f46c..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import shutil -from multiprocessing import Pool - -import SimpleITK as sitk -import numpy as np -from batchgenerators.utilities.file_and_folder_operations import * -from skimage.io import imread -from skimage.io import imsave -from skimage.morphology import disk -from skimage.morphology import erosion -from skimage.transform import resize - -from nnunet.paths import nnUNet_raw_data - - -def load_bmp_convert_to_nifti_borders_2d(img_file, lab_file, img_out_base, anno_out, spacing, border_thickness=0.7): - img = imread(img_file) - img_itk = sitk.GetImageFromArray(img.astype(np.float32)[None]) - img_itk.SetSpacing(list(spacing)[::-1] + [999]) - sitk.WriteImage(img_itk, join(img_out_base + "_0000.nii.gz")) - - if lab_file is not None: - l = imread(lab_file) - borders = generate_border_as_suggested_by_twollmann_2d(l, spacing, border_thickness) - l[l > 0] = 1 - l[borders == 1] = 2 - l_itk = sitk.GetImageFromArray(l.astype(np.uint8)[None]) - l_itk.SetSpacing(list(spacing)[::-1] + [999]) - sitk.WriteImage(l_itk, anno_out) - - -def generate_disk(spacing, radius, dtype=int): - radius_in_voxels = np.round(radius / np.array(spacing)).astype(int) - n = 2 * radius_in_voxels + 1 - disk_iso = disk(max(n) * 2, dtype=np.float64) - disk_resampled = resize(disk_iso, n, 1, 'constant', 0, clip=True, anti_aliasing=False, preserve_range=True) - disk_resampled[disk_resampled > 0.5] = 1 - disk_resampled[disk_resampled <= 0.5] = 0 - return disk_resampled.astype(dtype) - - -def generate_border_as_suggested_by_twollmann_2d(label_img: np.ndarray, spacing, - border_thickness: float = 2) -> np.ndarray: - border = np.zeros_like(label_img) - selem = generate_disk(spacing, border_thickness) - for l in np.unique(label_img): - if l == 0: continue - mask = (label_img == l).astype(int) - eroded = erosion(mask, selem) - border[(eroded == 0) & (mask != 0)] = 1 - return border - - -def prepare_task(base, task_id, task_name, spacing, border_thickness: float = 15): - p = Pool(16) - - foldername = "Task%03.0d_%s" % (task_id, task_name) - - out_base = join(nnUNet_raw_data, foldername) - imagestr = join(out_base, "imagesTr") - imagests = join(out_base, "imagesTs") - labelstr = join(out_base, "labelsTr") - maybe_mkdir_p(imagestr) - maybe_mkdir_p(imagests) - maybe_mkdir_p(labelstr) - - train_patient_names = [] - test_patient_names = [] - res = [] - - for train_sequence in [i for i in subfolders(base + "_train", join=False) if not i.endswith("_GT")]: - train_cases = subfiles(join(base + '_train', train_sequence), suffix=".tif", join=False) - for t in train_cases: - casename = train_sequence + "_" + t[:-4] - img_file = join(base + '_train', train_sequence, t) - lab_file = join(base + '_train', train_sequence + "_GT", "SEG", "man_seg" + t[1:]) - if not isfile(lab_file): - continue - img_out_base = join(imagestr, casename) - anno_out = join(labelstr, casename + ".nii.gz") - res.append( - p.starmap_async(load_bmp_convert_to_nifti_borders_2d, - ((img_file, lab_file, img_out_base, anno_out, spacing, border_thickness),))) - train_patient_names.append(casename) - - for test_sequence in [i for i in subfolders(base + "_test", join=False) if not i.endswith("_GT")]: - test_cases = subfiles(join(base + '_test', test_sequence), suffix=".tif", join=False) - for t in test_cases: - casename = test_sequence + "_" + t[:-4] - img_file = join(base + '_test', test_sequence, t) - lab_file = None - img_out_base = join(imagests, casename) - anno_out = None - res.append( - p.starmap_async(load_bmp_convert_to_nifti_borders_2d, - ((img_file, lab_file, img_out_base, anno_out, spacing, border_thickness),))) - test_patient_names.append(casename) - - _ = [i.get() for i in res] - - json_dict = {} - json_dict['name'] = task_name - json_dict['description'] = "" - json_dict['tensorImageSize'] = "4D" - json_dict['reference'] = "" - json_dict['licence'] = "" - json_dict['release'] = "0.0" - json_dict['modality'] = { - "0": "BF", - } - json_dict['labels'] = { - "0": "background", - "1": "cell", - "2": "border", - } - - json_dict['numTraining'] = len(train_patient_names) - json_dict['numTest'] = len(test_patient_names) - json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in - train_patient_names] - json_dict['test'] = ["./imagesTs/%s.nii.gz" % i for i in test_patient_names] - - save_json(json_dict, os.path.join(out_base, "dataset.json")) - p.close() - p.join() - - -def convert_to_instance_seg(arr: np.ndarray, spacing: tuple = (0.125, 0.125), small_center_threshold: int = 30, - isolated_border_as_separate_instance_threshold=15): - from skimage.morphology import label, dilation - - # we first identify centers that are too small and set them to be border. This should remove false positive instances - objects = label((arr == 1).astype(int)) - for o in np.unique(objects): - if o > 0 and np.sum(objects == o) <= small_center_threshold: - arr[objects == o] = 2 - - # 1 is core, 2 is border - objects = label((arr == 1).astype(int)) - final = np.copy(objects) - remaining_border = arr == 2 - current = np.copy(objects) - dilated_mm = np.array((0, 0)) - spacing = np.array(spacing) - - while np.sum(remaining_border) > 0: - strel_size = [0, 0] - maximum_dilation = max(dilated_mm) - for i in range(2): - if spacing[i] == min(spacing): - strel_size[i] = 1 - continue - if dilated_mm[i] + spacing[i] / 2 < maximum_dilation: - strel_size[i] = 1 - ball_here = disk(1) - - if strel_size[0] == 0: ball_here = ball_here[1:2] - if strel_size[1] == 0: ball_here = ball_here[:, 1:2] - - #print(1) - dilated = dilation(current, ball_here) - diff = (current == 0) & (dilated != current) - final[diff & remaining_border] = dilated[diff & remaining_border] - remaining_border[diff] = 0 - current = dilated - dilated_mm = [dilated_mm[i] + spacing[i] if strel_size[i] == 1 else dilated_mm[i] for i in range(2)] - - # what can happen is that a cell is so small that the network only predicted border and no core. This cell will be - # fused with the nearest other instance, which we don't want. Therefore we identify isolated border predictions and - # give them a separate instance id - # we identify isolated border predictions by checking each foreground object in arr and see whether this object - # also contains label 1 - max_label = np.max(final) - - foreground_objects = label((arr != 0).astype(int)) - for i in np.unique(foreground_objects): - if i > 0 and (1 not in np.unique(arr[foreground_objects==i])): - size_of_object = np.sum(foreground_objects==i) - if size_of_object >= isolated_border_as_separate_instance_threshold: - final[foreground_objects == i] = max_label + 1 - max_label += 1 - #print('yeah boi') - - return final.astype(np.uint32) - - -def load_convert_to_instance_save(file_in: str, file_out: str, spacing): - img = sitk.ReadImage(file_in) - img_npy = sitk.GetArrayFromImage(img) - out = convert_to_instance_seg(img_npy[0], spacing)[None] - out_itk = sitk.GetImageFromArray(out.astype(np.int16)) - out_itk.CopyInformation(img) - sitk.WriteImage(out_itk, file_out) - - -def convert_folder_to_instanceseg(folder_in: str, folder_out: str, spacing, processes: int = 12): - input_files = subfiles(folder_in, suffix=".nii.gz", join=False) - maybe_mkdir_p(folder_out) - output_files = [join(folder_out, i) for i in input_files] - input_files = [join(folder_in, i) for i in input_files] - p = Pool(processes) - r = [] - for i, o in zip(input_files, output_files): - r.append( - p.starmap_async( - load_convert_to_instance_save, - ((i, o, spacing),) - ) - ) - _ = [i.get() for i in r] - p.close() - p.join() - - -def convert_to_tiff(nifti_image: str, output_name: str): - npy = sitk.GetArrayFromImage(sitk.ReadImage(nifti_image)) - imsave(output_name, npy[0].astype(np.uint16), compress=6) - - -if __name__ == "__main__": - base = "/home/fabian/Downloads/Fluo-N2DH-SIM+" - task_name = 'Fluo-N2DH-SIM' - spacing = (0.125, 0.125) - - task_id = 999 - border_thickness = 0.7 - prepare_task(base, task_id, task_name, spacing, border_thickness) - - task_id = 89 - additional_time_steps = 4 - task_name = 'Fluo-N2DH-SIM_thickborder_time' - full_taskname = 'Task%03.0d_' % task_id + task_name - output_raw = join(nnUNet_raw_data, full_taskname) - shutil.rmtree(output_raw) - shutil.copytree(join(nnUNet_raw_data, 'Task999_Fluo-N2DH-SIM_thickborder'), output_raw) - - shutil.rmtree(join(nnUNet_raw_data, 'Task999_Fluo-N2DH-SIM_thickborder')) - - # now add additional time information - for fld in ['imagesTr', 'imagesTs']: - curr = join(output_raw, fld) - for seq in ['01', '02']: - images = subfiles(curr, prefix=seq, join=False) - for i in images: - current_timestep = int(i.split('_')[1][1:]) - renamed = join(curr, i.replace("_0000", "_%04.0d" % additional_time_steps)) - shutil.move(join(curr, i), renamed) - for previous_timestep in range(-additional_time_steps, 0): - # previous time steps will already have been processed and renamed! - expected_filename = join(curr, seq + "_t%03.0d" % ( - current_timestep + previous_timestep) + "_%04.0d" % additional_time_steps + ".nii.gz") - if not isfile(expected_filename): - # create empty image - img = sitk.ReadImage(renamed) - empty = sitk.GetImageFromArray(np.zeros_like(sitk.GetArrayFromImage(img))) - empty.CopyInformation(img) - sitk.WriteImage(empty, join(curr, i.replace("_0000", "_%04.0d" % ( - additional_time_steps + previous_timestep)))) - else: - shutil.copy(expected_filename, join(curr, i.replace("_0000", "_%04.0d" % ( - additional_time_steps + previous_timestep)))) - dataset = load_json(join(output_raw, 'dataset.json')) - dataset['modality'] = { - '0': 't_minus 4', - '1': 't_minus 3', - '2': 't_minus 2', - '3': 't_minus 1', - '4': 'frame of interest', - } - save_json(dataset, join(output_raw, 'dataset.json')) - - # we do not need custom splits since we train on all training cases - - # test set predictions are converted to instance seg with convert_folder_to_instanceseg - - # test set predictions are converted to tiff with convert_to_tiff \ No newline at end of file diff --git a/spaces/huolongguo10/huolongguo10-check_sec/app.py b/spaces/huolongguo10/huolongguo10-check_sec/app.py deleted file mode 100644 index c4e856123ed12646c9a4133a6dd1fdca243149b2..0000000000000000000000000000000000000000 --- a/spaces/huolongguo10/huolongguo10-check_sec/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import gradio as gr -import transformers -from transformers import BertTokenizer, DataCollatorWithPadding -from transformers import AutoModelForSequenceClassification -tokenizer = BertTokenizer.from_pretrained('huolongguo10/check_sec') -model = AutoModelForSequenceClassification.from_pretrained('huolongguo10/check_sec', num_labels=2) -_tokenizer = BertTokenizer.from_pretrained('huolongguo10/check_sec_tiny') -_model = AutoModelForSequenceClassification.from_pretrained('huolongguo10/check_sec_tiny', num_labels=2) -import torch -def check_each(text): - inputs = tokenizer(text, return_tensors="pt",max_length=512) - with torch.no_grad(): - logits = model(**inputs).logits - predicted_class_id = logits.argmax().item() - print(f'{logits.argmax().item()}:{text}') - return 'secure' if predicted_class_id==0 else 'insecure' -def _check_each(text): - inputs = _tokenizer(text, return_tensors="pt",max_length=512) - with torch.no_grad(): - logits = _model(**inputs).logits - predicted_class_id = logits.argmax().item() - print(f't-{logits.argmax().item()}:{text}') - return 'secure' if predicted_class_id==0 else 'insecure' -def _check(text): - t=text - while len(t)>512: - if check_each(t[0:511])=='insecure': - return 'insecure' - t=t[512:] - return check_each(t) -def _check_tiny(text): - t=text - while len(t)>512: - if _check_each(t[0:511])=='insecure': - return 'insecure' - t=t[512:] - return _check_each(t) -def check(text): - return _check(text),_check_tiny(text) -with gr.Blocks() as demo: - text = gr.Textbox(label="Text") - output = gr.Textbox(label="Output Box") - _output = gr.Textbox(label="Output Box(By Tiny)") - # org = gr.Textbox(label="By normal check") - greet_btn = gr.Button("Check!") - greet_btn.click(fn=check, inputs=text, outputs=[output,_output], api_name="check") - gr.Markdown('''# check_sec -检查web参数安全性,支持多种payload(v0.1.2) - -## 类型 -``` -LABEL_0: secure -LABEL_1: insecure(可能包含payload) -``` - ''') -# gr.Interface.load("models/huolongguo10/check_sec").launch() - -demo.launch() \ No newline at end of file diff --git a/spaces/hysts/gan-control/app.py b/spaces/hysts/gan-control/app.py deleted file mode 100644 index 360cd575bcdcf842378533ba39c4c86e7f0eb13f..0000000000000000000000000000000000000000 --- a/spaces/hysts/gan-control/app.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import functools -import os -import pathlib -import shlex -import subprocess -import sys -import tarfile - -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image -import torch - -if os.getenv('SYSTEM') == 'spaces': - with open('patch') as f: - subprocess.run(shlex.split('patch -p1'), cwd='gan-control', stdin=f) - -sys.path.insert(0, 'gan-control/src') - -from gan_control.inference.controller import Controller - -TITLE = 'GAN-Control' -DESCRIPTION = 'https://github.com/amazon-research/gan-control' - - -def download_models() -> None: - model_dir = pathlib.Path('controller_age015id025exp02hai04ori02gam15') - if not model_dir.exists(): - path = huggingface_hub.hf_hub_download( - 'public-data/gan-control', - 'controller_age015id025exp02hai04ori02gam15.tar.gz') - with tarfile.open(path) as f: - f.extractall() - - -@torch.inference_mode() -def run( - seed: int, - truncation: float, - yaw: int, - pitch: int, - age: int, - hair_color_r: float, - hair_color_g: float, - hair_color_b: float, - nrows: int, - ncols: int, - controller: Controller, - device: torch.device, -) -> PIL.Image.Image: - seed = int(np.clip(seed, 0, np.iinfo(np.uint32).max)) - batch_size = nrows * ncols - latent_size = controller.config.model_config['latent_size'] - latent = torch.from_numpy( - np.random.RandomState(seed).randn(batch_size, - latent_size)).float().to(device) - - initial_image_tensors, initial_latent_z, initial_latent_w = controller.gen_batch( - latent=latent, truncation=truncation) - res0 = controller.make_resized_grid_image(initial_image_tensors, - nrow=ncols) - - pose_control = torch.tensor([[yaw, pitch, 0]], dtype=torch.float32) - image_tensors, _, modified_latent_w = controller.gen_batch_by_controls( - latent=initial_latent_w, - input_is_latent=True, - orientation=pose_control) - res1 = controller.make_resized_grid_image(image_tensors, nrow=ncols) - - age_control = torch.tensor([[age]], dtype=torch.float32) - image_tensors, _, modified_latent_w = controller.gen_batch_by_controls( - latent=initial_latent_w, input_is_latent=True, age=age_control) - res2 = controller.make_resized_grid_image(image_tensors, nrow=ncols) - - hair_color = torch.tensor([[hair_color_r, hair_color_g, hair_color_b]], - dtype=torch.float32) / 255 - hair_color = torch.clamp(hair_color, 0, 1) - image_tensors, _, modified_latent_w = controller.gen_batch_by_controls( - latent=initial_latent_w, input_is_latent=True, hair=hair_color) - res3 = controller.make_resized_grid_image(image_tensors, nrow=ncols) - - return res0, res1, res2, res3 - - -download_models() - -device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') -path = 'controller_age015id025exp02hai04ori02gam15/' -controller = Controller(path, device) -fn = functools.partial(run, controller=controller, device=device) - -gr.Interface( - fn=fn, - inputs=[ - gr.Slider(label='Seed', minimum=0, maximum=1000000, step=1, value=0), - gr.Slider(label='Truncation', - minimum=0, - maximum=1, - step=0.1, - value=0.7), - gr.Slider(label='Yaw', minimum=-90, maximum=90, step=1, value=30), - gr.Slider(label='Pitch', minimum=-90, maximum=90, step=1, value=0), - gr.Slider(label='Age', minimum=15, maximum=75, step=1, value=75), - gr.Slider(label='Hair Color (R)', - minimum=0, - maximum=255, - step=1, - value=186), - gr.Slider(label='Hair Color (G)', - minimum=0, - maximum=255, - step=1, - value=158), - gr.Slider(label='Hair Color (B)', - minimum=0, - maximum=255, - step=1, - value=92), - gr.Slider(label='Number of Rows', - minimum=1, - maximum=3, - step=1, - value=1), - gr.Slider(label='Number of Columns', - minimum=1, - maximum=5, - step=1, - value=5), - ], - outputs=[ - gr.Image(label='Generated Image', type='pil'), - gr.Image(label='Head Pose Controlled', type='pil'), - gr.Image(label='Age Controlled', type='pil'), - gr.Image(label='Hair Color Controlled', type='pil'), - ], - title=TITLE, - description=DESCRIPTION, -).queue(max_size=10).launch() diff --git a/spaces/hysts/insightface-person-detection/app.py b/spaces/hysts/insightface-person-detection/app.py deleted file mode 100644 index 2ec075ee3cad7f12866782288a970c4fda78126b..0000000000000000000000000000000000000000 --- a/spaces/hysts/insightface-person-detection/app.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import functools -import pathlib - -import cv2 -import gradio as gr -import huggingface_hub -import insightface -import numpy as np -import onnxruntime as ort - -TITLE = 'insightface Person Detection' -DESCRIPTION = 'https://github.com/deepinsight/insightface/tree/master/examples/person_detection' - - -def load_model(): - path = huggingface_hub.hf_hub_download('public-data/insightface', - 'models/scrfd_person_2.5g.onnx') - options = ort.SessionOptions() - options.intra_op_num_threads = 8 - options.inter_op_num_threads = 8 - session = ort.InferenceSession( - path, - sess_options=options, - providers=['CPUExecutionProvider', 'CUDAExecutionProvider']) - model = insightface.model_zoo.retinaface.RetinaFace(model_file=path, - session=session) - return model - - -def detect_person( - img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace -) -> tuple[np.ndarray, np.ndarray]: - bboxes, kpss = detector.detect(img) - bboxes = np.round(bboxes[:, :4]).astype(int) - kpss = np.round(kpss).astype(int) - kpss[:, :, 0] = np.clip(kpss[:, :, 0], 0, img.shape[1]) - kpss[:, :, 1] = np.clip(kpss[:, :, 1], 0, img.shape[0]) - vbboxes = bboxes.copy() - vbboxes[:, 0] = kpss[:, 0, 0] - vbboxes[:, 1] = kpss[:, 0, 1] - vbboxes[:, 2] = kpss[:, 4, 0] - vbboxes[:, 3] = kpss[:, 4, 1] - return bboxes, vbboxes - - -def visualize(image: np.ndarray, bboxes: np.ndarray, - vbboxes: np.ndarray) -> np.ndarray: - res = image.copy() - for i in range(bboxes.shape[0]): - bbox = bboxes[i] - vbbox = vbboxes[i] - x1, y1, x2, y2 = bbox - vx1, vy1, vx2, vy2 = vbbox - cv2.rectangle(res, (x1, y1), (x2, y2), (0, 255, 0), 1) - alpha = 0.8 - color = (255, 0, 0) - for c in range(3): - res[vy1:vy2, vx1:vx2, - c] = res[vy1:vy2, vx1:vx2, - c] * alpha + color[c] * (1.0 - alpha) - cv2.circle(res, (vx1, vy1), 1, color, 2) - cv2.circle(res, (vx1, vy2), 1, color, 2) - cv2.circle(res, (vx2, vy1), 1, color, 2) - cv2.circle(res, (vx2, vy2), 1, color, 2) - return res - - -def detect(image: np.ndarray, detector) -> np.ndarray: - image = image[:, :, ::-1] # RGB -> BGR - bboxes, vbboxes = detect_person(image, detector) - res = visualize(image, bboxes, vbboxes) - return res[:, :, ::-1] # BGR -> RGB - - -detector = load_model() -detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640)) -fn = functools.partial(detect, detector=detector) - -image_dir = pathlib.Path('images') -examples = [[path.as_posix()] for path in sorted(image_dir.glob('*.jpg'))] - -gr.Interface( - fn=fn, - inputs=gr.Image(label='Input', type='numpy'), - outputs=gr.Image(label='Output', height=600), - examples=examples, - examples_per_page=30, - title=TITLE, - description=DESCRIPTION, -).queue().launch() diff --git a/spaces/hzy123/bingo/src/components/chat-history.tsx b/spaces/hzy123/bingo/src/components/chat-history.tsx deleted file mode 100644 index feb81de66562edda8f40d3c0cc717202c92b6509..0000000000000000000000000000000000000000 --- a/spaces/hzy123/bingo/src/components/chat-history.tsx +++ /dev/null @@ -1,48 +0,0 @@ -import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons" - -export function ChatHistory() { - return ( -
      -
      - 历史记录 -
      -
      -
      -
      -
      -
      -
      - -
      -

      无标题的聊天

      -
      -

      上午1:42

      -
      - - - - - - - - -
      -
      -
      -
      -
      -
      -
      -
      - ) -} diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/README.md b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/README.md deleted file mode 100644 index 2ee63a861229b68873561fa39bfa7c9a8b53b947..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/README.md +++ /dev/null @@ -1,164 +0,0 @@ -# Distributed Arcface Training in Pytorch - -This is a deep learning library that makes face recognition efficient, and effective, which can train tens of millions -identity on a single server. - -## Requirements - -- Install [pytorch](http://pytorch.org) (torch>=1.6.0), our doc for [install.md](docs/install.md). -- `pip install -r requirements.txt`. -- Download the dataset - from [https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_](https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_) - . - -## How to Training - -To train a model, run `train.py` with the path to the configs: - -### 1. Single node, 8 GPUs: - -```shell -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r50 -``` - -### 2. Multiple nodes, each node 8 GPUs: - -Node 0: - -```shell -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr="ip1" --master_port=1234 train.py train.py configs/ms1mv3_r50 -``` - -Node 1: - -```shell -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr="ip1" --master_port=1234 train.py train.py configs/ms1mv3_r50 -``` - -### 3.Training resnet2060 with 8 GPUs: - -```shell -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r2060.py -``` - -## Model Zoo - -- The models are available for non-commercial research purposes only. -- All models can be found in here. -- [Baidu Yun Pan](https://pan.baidu.com/s/1CL-l4zWqsI1oDuEEYVhj-g): e8pw -- [onedrive](https://1drv.ms/u/s!AswpsDO2toNKq0lWY69vN58GR6mw?e=p9Ov5d) - -### Performance on [**ICCV2021-MFR**](http://iccv21-mfr.com/) - -ICCV2021-MFR testset consists of non-celebrities so we can ensure that it has very few overlap with public available face -recognition training set, such as MS1M and CASIA as they mostly collected from online celebrities. -As the result, we can evaluate the FAIR performance for different algorithms. - -For **ICCV2021-MFR-ALL** set, TAR is measured on all-to-all 1:1 protocal, with FAR less than 0.000001(e-6). The -globalised multi-racial testset contains 242,143 identities and 1,624,305 images. - -For **ICCV2021-MFR-MASK** set, TAR is measured on mask-to-nonmask 1:1 protocal, with FAR less than 0.0001(e-4). -Mask testset contains 6,964 identities, 6,964 masked images and 13,928 non-masked images. -There are totally 13,928 positive pairs and 96,983,824 negative pairs. - -| Datasets | backbone | Training throughout | Size / MB | **ICCV2021-MFR-MASK** | **ICCV2021-MFR-ALL** | -| :---: | :--- | :--- | :--- |:--- |:--- | -| MS1MV3 | r18 | - | 91 | **47.85** | **68.33** | -| Glint360k | r18 | 8536 | 91 | **53.32** | **72.07** | -| MS1MV3 | r34 | - | 130 | **58.72** | **77.36** | -| Glint360k | r34 | 6344 | 130 | **65.10** | **83.02** | -| MS1MV3 | r50 | 5500 | 166 | **63.85** | **80.53** | -| Glint360k | r50 | 5136 | 166 | **70.23** | **87.08** | -| MS1MV3 | r100 | - | 248 | **69.09** | **84.31** | -| Glint360k | r100 | 3332 | 248 | **75.57** | **90.66** | -| MS1MV3 | mobilefacenet | 12185 | 7.8 | **41.52** | **65.26** | -| Glint360k | mobilefacenet | 11197 | 7.8 | **44.52** | **66.48** | - -### Performance on IJB-C and Verification Datasets - -| Datasets | backbone | IJBC(1e-05) | IJBC(1e-04) | agedb30 | cfp_fp | lfw | log | -| :---: | :--- | :--- | :--- | :--- |:--- |:--- |:--- | -| MS1MV3 | r18 | 92.07 | 94.66 | 97.77 | 97.73 | 99.77 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r18_fp16/training.log)| -| MS1MV3 | r34 | 94.10 | 95.90 | 98.10 | 98.67 | 99.80 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r34_fp16/training.log)| -| MS1MV3 | r50 | 94.79 | 96.46 | 98.35 | 98.96 | 99.83 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r50_fp16/training.log)| -| MS1MV3 | r100 | 95.31 | 96.81 | 98.48 | 99.06 | 99.85 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r100_fp16/training.log)| -| MS1MV3 | **r2060**| 95.34 | 97.11 | 98.67 | 99.24 | 99.87 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r2060_fp16/training.log)| -| Glint360k |r18-0.1 | 93.16 | 95.33 | 97.72 | 97.73 | 99.77 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r18_fp16_0.1/training.log)| -| Glint360k |r34-0.1 | 95.16 | 96.56 | 98.33 | 98.78 | 99.82 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r34_fp16_0.1/training.log)| -| Glint360k |r50-0.1 | 95.61 | 96.97 | 98.38 | 99.20 | 99.83 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r50_fp16_0.1/training.log)| -| Glint360k |r100-0.1 | 95.88 | 97.32 | 98.48 | 99.29 | 99.82 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r100_fp16_0.1/training.log)| - -[comment]: <> (More details see [model.md](docs/modelzoo.md) in docs.) - - -## [Speed Benchmark](docs/speed_benchmark.md) - -**Arcface Torch** can train large-scale face recognition training set efficiently and quickly. When the number of -classes in training sets is greater than 300K and the training is sufficient, partial fc sampling strategy will get same -accuracy with several times faster training performance and smaller GPU memory. -Partial FC is a sparse variant of the model parallel architecture for large sacle face recognition. Partial FC use a -sparse softmax, where each batch dynamicly sample a subset of class centers for training. In each iteration, only a -sparse part of the parameters will be updated, which can reduce a lot of GPU memory and calculations. With Partial FC, -we can scale trainset of 29 millions identities, the largest to date. Partial FC also supports multi-machine distributed -training and mixed precision training. - -![Image text](https://github.com/anxiangsir/insightface_arcface_log/blob/master/partial_fc_v2.png) - -More details see -[speed_benchmark.md](docs/speed_benchmark.md) in docs. - -### 1. Training speed of different parallel methods (samples / second), Tesla V100 32GB * 8. (Larger is better) - -`-` means training failed because of gpu memory limitations. - -| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | -| :--- | :--- | :--- | :--- | -|125000 | 4681 | 4824 | 5004 | -|1400000 | **1672** | 3043 | 4738 | -|5500000 | **-** | **1389** | 3975 | -|8000000 | **-** | **-** | 3565 | -|16000000 | **-** | **-** | 2679 | -|29000000 | **-** | **-** | **1855** | - -### 2. GPU memory cost of different parallel methods (MB per GPU), Tesla V100 32GB * 8. (Smaller is better) - -| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | -| :--- | :--- | :--- | :--- | -|125000 | 7358 | 5306 | 4868 | -|1400000 | 32252 | 11178 | 6056 | -|5500000 | **-** | 32188 | 9854 | -|8000000 | **-** | **-** | 12310 | -|16000000 | **-** | **-** | 19950 | -|29000000 | **-** | **-** | 32324 | - -## Evaluation ICCV2021-MFR and IJB-C - -More details see [eval.md](docs/eval.md) in docs. - -## Test - -We tested many versions of PyTorch. Please create an issue if you are having trouble. - -- [x] torch 1.6.0 -- [x] torch 1.7.1 -- [x] torch 1.8.0 -- [x] torch 1.9.0 - -## Citation - -``` -@inproceedings{deng2019arcface, - title={Arcface: Additive angular margin loss for deep face recognition}, - author={Deng, Jiankang and Guo, Jia and Xue, Niannan and Zafeiriou, Stefanos}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={4690--4699}, - year={2019} -} -@inproceedings{an2020partical_fc, - title={Partial FC: Training 10 Million Identities on a Single Machine}, - author={An, Xiang and Zhu, Xuhan and Xiao, Yang and Wu, Lan and Zhang, Ming and Gao, Yuan and Qin, Bin and - Zhang, Debing and Fu Ying}, - booktitle={Arxiv 2010.05222}, - year={2020} -} -``` diff --git a/spaces/iamironman4279/SadTalker/src/utils/audio.py b/spaces/iamironman4279/SadTalker/src/utils/audio.py deleted file mode 100644 index 89433eb4c681112804fbed72b157700f553739a8..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/utils/audio.py +++ /dev/null @@ -1,136 +0,0 @@ -import librosa -import librosa.filters -import numpy as np -# import tensorflow as tf -from scipy import signal -from scipy.io import wavfile -from src.utils.hparams import hparams as hp - -def load_wav(path, sr): - return librosa.core.load(path, sr=sr)[0] - -def save_wav(wav, path, sr): - wav *= 32767 / max(0.01, np.max(np.abs(wav))) - #proposed by @dsmiller - wavfile.write(path, sr, wav.astype(np.int16)) - -def save_wavenet_wav(wav, path, sr): - librosa.output.write_wav(path, wav, sr=sr) - -def preemphasis(wav, k, preemphasize=True): - if preemphasize: - return signal.lfilter([1, -k], [1], wav) - return wav - -def inv_preemphasis(wav, k, inv_preemphasize=True): - if inv_preemphasize: - return signal.lfilter([1], [1, -k], wav) - return wav - -def get_hop_size(): - hop_size = hp.hop_size - if hop_size is None: - assert hp.frame_shift_ms is not None - hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate) - return hop_size - -def linearspectrogram(wav): - D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize)) - S = _amp_to_db(np.abs(D)) - hp.ref_level_db - - if hp.signal_normalization: - return _normalize(S) - return S - -def melspectrogram(wav): - D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize)) - S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db - - if hp.signal_normalization: - return _normalize(S) - return S - -def _lws_processor(): - import lws - return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech") - -def _stft(y): - if hp.use_lws: - return _lws_processor(hp).stft(y).T - else: - return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size) - -########################################################## -#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!) -def num_frames(length, fsize, fshift): - """Compute number of time frames of spectrogram - """ - pad = (fsize - fshift) - if length % fshift == 0: - M = (length + pad * 2 - fsize) // fshift + 1 - else: - M = (length + pad * 2 - fsize) // fshift + 2 - return M - - -def pad_lr(x, fsize, fshift): - """Compute left and right padding - """ - M = num_frames(len(x), fsize, fshift) - pad = (fsize - fshift) - T = len(x) + 2 * pad - r = (M - 1) * fshift + fsize - T - return pad, pad + r -########################################################## -#Librosa correct padding -def librosa_pad_lr(x, fsize, fshift): - return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0] - -# Conversions -_mel_basis = None - -def _linear_to_mel(spectogram): - global _mel_basis - if _mel_basis is None: - _mel_basis = _build_mel_basis() - return np.dot(_mel_basis, spectogram) - -def _build_mel_basis(): - assert hp.fmax <= hp.sample_rate // 2 - return librosa.filters.mel(sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels, - fmin=hp.fmin, fmax=hp.fmax) - -def _amp_to_db(x): - min_level = np.exp(hp.min_level_db / 20 * np.log(10)) - return 20 * np.log10(np.maximum(min_level, x)) - -def _db_to_amp(x): - return np.power(10.0, (x) * 0.05) - -def _normalize(S): - if hp.allow_clipping_in_normalization: - if hp.symmetric_mels: - return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value, - -hp.max_abs_value, hp.max_abs_value) - else: - return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value) - - assert S.max() <= 0 and S.min() - hp.min_level_db >= 0 - if hp.symmetric_mels: - return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value - else: - return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)) - -def _denormalize(D): - if hp.allow_clipping_in_normalization: - if hp.symmetric_mels: - return (((np.clip(D, -hp.max_abs_value, - hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) - + hp.min_level_db) - else: - return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db) - - if hp.symmetric_mels: - return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db) - else: - return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db) diff --git a/spaces/iamstolas/STOLAS/src/components/markdown.tsx b/spaces/iamstolas/STOLAS/src/components/markdown.tsx deleted file mode 100644 index d4491467a1f14d1d72e535caac9c40636054e5df..0000000000000000000000000000000000000000 --- a/spaces/iamstolas/STOLAS/src/components/markdown.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { FC, memo } from 'react' -import ReactMarkdown, { Options } from 'react-markdown' - -export const MemoizedReactMarkdown: FC = memo( - ReactMarkdown, - (prevProps, nextProps) => - prevProps.children === nextProps.children && - prevProps.className === nextProps.className -) diff --git a/spaces/inamXcontru/PoeticTTS/Answers Key Payroll Accounting Project Chapter 7.30l A Practical Approach to Payroll Accounting.md b/spaces/inamXcontru/PoeticTTS/Answers Key Payroll Accounting Project Chapter 7.30l A Practical Approach to Payroll Accounting.md deleted file mode 100644 index 6fa6ed28126efd0cdfc0fb178ebb1d699d3c2b08..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Answers Key Payroll Accounting Project Chapter 7.30l A Practical Approach to Payroll Accounting.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Answers Key Payroll Accounting Project Chapter 7.30l


      Download ✦✦✦ https://gohhs.com/2uz4li



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/inamXcontru/PoeticTTS/Cinderella Movie In Hindi Dubbed Free [WORK] Download.md b/spaces/inamXcontru/PoeticTTS/Cinderella Movie In Hindi Dubbed Free [WORK] Download.md deleted file mode 100644 index 32b806a9cfae27b35c9b48ebef95a2c7ba40885e..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Cinderella Movie In Hindi Dubbed Free [WORK] Download.md +++ /dev/null @@ -1,7 +0,0 @@ - -

      The prices of movies with English subtitles is usually very affordable because a lot of money is spent to make a movie. For example, let’s say the movie is a Western movie, and the movie is an action movie, and the movie stars Daniel Craig, or Tom Cruise, or any other famous movie star. Let’s say the movie has a budget of ten million dollars. So obviously, the cost to make the movie has been very high. Thus, it is necessary to charge as much as possible to make money for making the movie. Also, it is even more profitable for the movie to be made into Hindi so that it can be subtitled into Hindi and put into the Indian movie market. So, the cost of making the movie is reduced from a ten million dollar budget to a two million dollar budget.

      -

      cinderella movie in hindi dubbed free download


      DOWNLOAD ····· https://gohhs.com/2uz4rS



      -

      However, if the movie is just a Hindi dubbed English movie, and it does not have any famous actors, then there is no need to hire actors who would be paid a lot of money. So, a Hindi dubbed English movie made in India, which has actors who cost millions of dollars to hire would only cost a hundred thousand dollars to make. Also, if the movie has a budget of five million, then there is no need to spend ten million dollars to hire famous actors. So, the only thing that will be needed is to hire writers and producers who will charge a thousand dollars an hour. Thus, making a Hindi dubbed English movie costs only a small amount of money, and it is more lucrative to hire actors in India, make it in India, and then have it subtitled into Hindi and put into the Indian movie market.

      -

      Youtube is one of the best destinations to watch movies online for free. It provides a rich platform to the users to view or download movies. The users can enjoy a variety of free movies and TV shows in different languages. The site keeps on adding more content every day. It also provides an option for its users to download videos and movies. The search option on the website is very effective. The users can choose from the latest movies available on the site and can also add it on their watchlist. The site also allows the users to create their account and can watch movies online.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/innat/VideoMAE/labels.py b/spaces/innat/VideoMAE/labels.py deleted file mode 100644 index be959dbb5edae2ee233b2f85de63cee9b2bf943b..0000000000000000000000000000000000000000 --- a/spaces/innat/VideoMAE/labels.py +++ /dev/null @@ -1,682 +0,0 @@ -K400_label_map = { - "abseiling": 0, - "air_drumming": 1, - "answering_questions": 2, - "applauding": 3, - "applying_cream": 4, - "archery": 5, - "arm_wrestling": 6, - "arranging_flowers": 7, - "assembling_computer": 8, - "auctioning": 9, - "baby_waking_up": 10, - "baking_cookies": 11, - "balloon_blowing": 12, - "bandaging": 13, - "barbequing": 14, - "bartending": 15, - "beatboxing": 16, - "bee_keeping": 17, - "belly_dancing": 18, - "bench_pressing": 19, - "bending_back": 20, - "bending_metal": 21, - "biking_through_snow": 22, - "blasting_sand": 23, - "blowing_glass": 24, - "blowing_leaves": 25, - "blowing_nose": 26, - "blowing_out_candles": 27, - "bobsledding": 28, - "bookbinding": 29, - "bouncing_on_trampoline": 30, - "bowling": 31, - "braiding_hair": 32, - "breading_or_breadcrumbing": 33, - "breakdancing": 34, - "brush_painting": 35, - "brushing_hair": 36, - "brushing_teeth": 37, - "building_cabinet": 38, - "building_shed": 39, - "bungee_jumping": 40, - "busking": 41, - "canoeing_or_kayaking": 42, - "capoeira": 43, - "carrying_baby": 44, - "cartwheeling": 45, - "carving_pumpkin": 46, - "catching_fish": 47, - "catching_or_throwing_baseball": 48, - "catching_or_throwing_frisbee": 49, - "catching_or_throwing_softball": 50, - "celebrating": 51, - "changing_oil": 52, - "changing_wheel": 53, - "checking_tires": 54, - "cheerleading": 55, - "chopping_wood": 56, - "clapping": 57, - "clay_pottery_making": 58, - "clean_and_jerk": 59, - "cleaning_floor": 60, - "cleaning_gutters": 61, - "cleaning_pool": 62, - "cleaning_shoes": 63, - "cleaning_toilet": 64, - "cleaning_windows": 65, - "climbing_a_rope": 66, - "climbing_ladder": 67, - "climbing_tree": 68, - "contact_juggling": 69, - "cooking_chicken": 70, - "cooking_egg": 71, - "cooking_on_campfire": 72, - "cooking_sausages": 73, - "counting_money": 74, - "country_line_dancing": 75, - "cracking_neck": 76, - "crawling_baby": 77, - "crossing_river": 78, - "crying": 79, - "curling_hair": 80, - "cutting_nails": 81, - "cutting_pineapple": 82, - "cutting_watermelon": 83, - "dancing_ballet": 84, - "dancing_charleston": 85, - "dancing_gangnam_style": 86, - "dancing_macarena": 87, - "deadlifting": 88, - "decorating_the_christmas_tree": 89, - "digging": 90, - "dining": 91, - "disc_golfing": 92, - "diving_cliff": 93, - "dodgeball": 94, - "doing_aerobics": 95, - "doing_laundry": 96, - "doing_nails": 97, - "drawing": 98, - "dribbling_basketball": 99, - "drinking": 100, - "drinking_beer": 101, - "drinking_shots": 102, - "driving_car": 103, - "driving_tractor": 104, - "drop_kicking": 105, - "drumming_fingers": 106, - "dunking_basketball": 107, - "dying_hair": 108, - "eating_burger": 109, - "eating_cake": 110, - "eating_carrots": 111, - "eating_chips": 112, - "eating_doughnuts": 113, - "eating_hotdog": 114, - "eating_ice_cream": 115, - "eating_spaghetti": 116, - "eating_watermelon": 117, - "egg_hunting": 118, - "exercising_arm": 119, - "exercising_with_an_exercise_ball": 120, - "extinguishing_fire": 121, - "faceplanting": 122, - "feeding_birds": 123, - "feeding_fish": 124, - "feeding_goats": 125, - "filling_eyebrows": 126, - "finger_snapping": 127, - "fixing_hair": 128, - "flipping_pancake": 129, - "flying_kite": 130, - "folding_clothes": 131, - "folding_napkins": 132, - "folding_paper": 133, - "front_raises": 134, - "frying_vegetables": 135, - "garbage_collecting": 136, - "gargling": 137, - "getting_a_haircut": 138, - "getting_a_tattoo": 139, - "giving_or_receiving_award": 140, - "golf_chipping": 141, - "golf_driving": 142, - "golf_putting": 143, - "grinding_meat": 144, - "grooming_dog": 145, - "grooming_horse": 146, - "gymnastics_tumbling": 147, - "hammer_throw": 148, - "headbanging": 149, - "headbutting": 150, - "high_jump": 151, - "high_kick": 152, - "hitting_baseball": 153, - "hockey_stop": 154, - "holding_snake": 155, - "hopscotch": 156, - "hoverboarding": 157, - "hugging": 158, - "hula_hooping": 159, - "hurdling": 160, - "hurling_(sport)": 161, - "ice_climbing": 162, - "ice_fishing": 163, - "ice_skating": 164, - "ironing": 165, - "javelin_throw": 166, - "jetskiing": 167, - "jogging": 168, - "juggling_balls": 169, - "juggling_fire": 170, - "juggling_soccer_ball": 171, - "jumping_into_pool": 172, - "jumpstyle_dancing": 173, - "kicking_field_goal": 174, - "kicking_soccer_ball": 175, - "kissing": 176, - "kitesurfing": 177, - "knitting": 178, - "krumping": 179, - "laughing": 180, - "laying_bricks": 181, - "long_jump": 182, - "lunge": 183, - "making_a_cake": 184, - "making_a_sandwich": 185, - "making_bed": 186, - "making_jewelry": 187, - "making_pizza": 188, - "making_snowman": 189, - "making_sushi": 190, - "making_tea": 191, - "marching": 192, - "massaging_back": 193, - "massaging_feet": 194, - "massaging_legs": 195, - "massaging_person's_head": 196, - "milking_cow": 197, - "mopping_floor": 198, - "motorcycling": 199, - "moving_furniture": 200, - "mowing_lawn": 201, - "news_anchoring": 202, - "opening_bottle": 203, - "opening_present": 204, - "paragliding": 205, - "parasailing": 206, - "parkour": 207, - "passing_American_football_(in_game)": 208, - "passing_American_football_(not_in_game)": 209, - "peeling_apples": 210, - "peeling_potatoes": 211, - "petting_animal_(not_cat)": 212, - "petting_cat": 213, - "picking_fruit": 214, - "planting_trees": 215, - "plastering": 216, - "playing_accordion": 217, - "playing_badminton": 218, - "playing_bagpipes": 219, - "playing_basketball": 220, - "playing_bass_guitar": 221, - "playing_cards": 222, - "playing_cello": 223, - "playing_chess": 224, - "playing_clarinet": 225, - "playing_controller": 226, - "playing_cricket": 227, - "playing_cymbals": 228, - "playing_didgeridoo": 229, - "playing_drums": 230, - "playing_flute": 231, - "playing_guitar": 232, - "playing_harmonica": 233, - "playing_harp": 234, - "playing_ice_hockey": 235, - "playing_keyboard": 236, - "playing_kickball": 237, - "playing_monopoly": 238, - "playing_organ": 239, - "playing_paintball": 240, - "playing_piano": 241, - "playing_poker": 242, - "playing_recorder": 243, - "playing_saxophone": 244, - "playing_squash_or_racquetball": 245, - "playing_tennis": 246, - "playing_trombone": 247, - "playing_trumpet": 248, - "playing_ukulele": 249, - "playing_violin": 250, - "playing_volleyball": 251, - "playing_xylophone": 252, - "pole_vault": 253, - "presenting_weather_forecast": 254, - "pull_ups": 255, - "pumping_fist": 256, - "pumping_gas": 257, - "punching_bag": 258, - "punching_person_(boxing)": 259, - "push_up": 260, - "pushing_car": 261, - "pushing_cart": 262, - "pushing_wheelchair": 263, - "reading_book": 264, - "reading_newspaper": 265, - "recording_music": 266, - "riding_a_bike": 267, - "riding_camel": 268, - "riding_elephant": 269, - "riding_mechanical_bull": 270, - "riding_mountain_bike": 271, - "riding_mule": 272, - "riding_or_walking_with_horse": 273, - "riding_scooter": 274, - "riding_unicycle": 275, - "ripping_paper": 276, - "robot_dancing": 277, - "rock_climbing": 278, - "rock_scissors_paper": 279, - "roller_skating": 280, - "running_on_treadmill": 281, - "sailing": 282, - "salsa_dancing": 283, - "sanding_floor": 284, - "scrambling_eggs": 285, - "scuba_diving": 286, - "setting_table": 287, - "shaking_hands": 288, - "shaking_head": 289, - "sharpening_knives": 290, - "sharpening_pencil": 291, - "shaving_head": 292, - "shaving_legs": 293, - "shearing_sheep": 294, - "shining_shoes": 295, - "shooting_basketball": 296, - "shooting_goal_(soccer)": 297, - "shot_put": 298, - "shoveling_snow": 299, - "shredding_paper": 300, - "shuffling_cards": 301, - "side_kick": 302, - "sign_language_interpreting": 303, - "singing": 304, - "situp": 305, - "skateboarding": 306, - "ski_jumping": 307, - "skiing_(not_slalom_or_crosscountry)": 308, - "skiing_crosscountry": 309, - "skiing_slalom": 310, - "skipping_rope": 311, - "skydiving": 312, - "slacklining": 313, - "slapping": 314, - "sled_dog_racing": 315, - "smoking": 316, - "smoking_hookah": 317, - "snatch_weight_lifting": 318, - "sneezing": 319, - "sniffing": 320, - "snorkeling": 321, - "snowboarding": 322, - "snowkiting": 323, - "snowmobiling": 324, - "somersaulting": 325, - "spinning_poi": 326, - "spray_painting": 327, - "spraying": 328, - "springboard_diving": 329, - "squat": 330, - "sticking_tongue_out": 331, - "stomping_grapes": 332, - "stretching_arm": 333, - "stretching_leg": 334, - "strumming_guitar": 335, - "surfing_crowd": 336, - "surfing_water": 337, - "sweeping_floor": 338, - "swimming_backstroke": 339, - "swimming_breast_stroke": 340, - "swimming_butterfly_stroke": 341, - "swing_dancing": 342, - "swinging_legs": 343, - "swinging_on_something": 344, - "sword_fighting": 345, - "tai_chi": 346, - "taking_a_shower": 347, - "tango_dancing": 348, - "tap_dancing": 349, - "tapping_guitar": 350, - "tapping_pen": 351, - "tasting_beer": 352, - "tasting_food": 353, - "testifying": 354, - "texting": 355, - "throwing_axe": 356, - "throwing_ball": 357, - "throwing_discus": 358, - "tickling": 359, - "tobogganing": 360, - "tossing_coin": 361, - "tossing_salad": 362, - "training_dog": 363, - "trapezing": 364, - "trimming_or_shaving_beard": 365, - "trimming_trees": 366, - "triple_jump": 367, - "tying_bow_tie": 368, - "tying_knot_(not_on_a_tie)": 369, - "tying_tie": 370, - "unboxing": 371, - "unloading_truck": 372, - "using_computer": 373, - "using_remote_controller_(not_gaming)": 374, - "using_segway": 375, - "vault": 376, - "waiting_in_line": 377, - "walking_the_dog": 378, - "washing_dishes": 379, - "washing_feet": 380, - "washing_hair": 381, - "washing_hands": 382, - "water_skiing": 383, - "water_sliding": 384, - "watering_plants": 385, - "waxing_back": 386, - "waxing_chest": 387, - "waxing_eyebrows": 388, - "waxing_legs": 389, - "weaving_basket": 390, - "welding": 391, - "whistling": 392, - "windsurfing": 393, - "wrapping_present": 394, - "wrestling": 395, - "writing": 396, - "yawning": 397, - "yoga": 398, - "zumba": 399, -} -SSv2_label_map = { - "Approaching something with your camera": 0, - "Attaching something to something": 1, - "Bending something so that it deforms": 2, - "Bending something until it breaks": 3, - "Burying something in something": 4, - "Closing something": 5, - "Covering something with something": 6, - "Digging something out of something": 7, - "Dropping something behind something": 8, - "Dropping something in front of something": 9, - "Dropping something into something": 10, - "Dropping something next to something": 11, - "Dropping something onto something": 12, - "Failing to put something into something because something does not fit": 13, - "Folding something": 14, - "Hitting something with something": 15, - "Holding something": 16, - "Holding something behind something": 17, - "Holding something in front of something": 18, - "Holding something next to something": 19, - "Holding something over something": 20, - "Laying something on the table on its side, not upright": 21, - "Letting something roll along a flat surface": 22, - "Letting something roll down a slanted surface": 23, - "Letting something roll up a slanted surface, so it rolls back down": 24, - "Lifting a surface with something on it but not enough for it to slide down": 25, - "Lifting a surface with something on it until it starts sliding down": 26, - "Lifting something up completely without letting it drop down": 27, - "Lifting something up completely, then letting it drop down": 28, - "Lifting something with something on it": 29, - "Lifting up one end of something without letting it drop down": 30, - "Lifting up one end of something, then letting it drop down": 31, - "Moving away from something with your camera": 32, - "Moving part of something": 33, - "Moving something across a surface until it falls down": 34, - "Moving something across a surface without it falling down": 35, - "Moving something and something away from each other": 36, - "Moving something and something closer to each other": 37, - "Moving something and something so they collide with each other": 38, - "Moving something and something so they pass each other": 39, - "Moving something away from something": 40, - "Moving something away from the camera": 41, - "Moving something closer to something": 42, - "Moving something down": 43, - "Moving something towards the camera": 44, - "Moving something up": 45, - "Opening something": 46, - "Picking something up": 47, - "Piling something up": 48, - "Plugging something into something": 49, - "Plugging something into something but pulling it right out as you remove your hand": 50, - "Poking a hole into some substance": 51, - "Poking a hole into something soft": 52, - "Poking a stack of something so the stack collapses": 53, - "Poking a stack of something without the stack collapsing": 54, - "Poking something so it slightly moves": 55, - "Poking something so lightly that it doesn't or almost doesn't move": 56, - "Poking something so that it falls over": 57, - "Poking something so that it spins around": 58, - "Pouring something into something": 59, - "Pouring something into something until it overflows": 60, - "Pouring something onto something": 61, - "Pouring something out of something": 62, - "Pretending or failing to wipe something off of something": 63, - "Pretending or trying and failing to twist something": 64, - "Pretending to be tearing something that is not tearable": 65, - "Pretending to close something without actually closing it": 66, - "Pretending to open something without actually opening it": 67, - "Pretending to pick something up": 68, - "Pretending to poke something": 69, - "Pretending to pour something out of something, but something is empty": 70, - "Pretending to put something behind something": 71, - "Pretending to put something into something": 72, - "Pretending to put something next to something": 73, - "Pretending to put something on a surface": 74, - "Pretending to put something onto something": 75, - "Pretending to put something underneath something": 76, - "Pretending to scoop something up with something": 77, - "Pretending to spread air onto something": 78, - "Pretending to sprinkle air onto something": 79, - "Pretending to squeeze something": 80, - "Pretending to take something from somewhere": 81, - "Pretending to take something out of something": 82, - "Pretending to throw something": 83, - "Pretending to turn something upside down": 84, - "Pulling something from behind of something": 85, - "Pulling something from left to right": 86, - "Pulling something from right to left": 87, - "Pulling something onto something": 88, - "Pulling something out of something": 89, - "Pulling two ends of something but nothing happens": 90, - "Pulling two ends of something so that it gets stretched": 91, - "Pulling two ends of something so that it separates into two pieces": 92, - "Pushing something from left to right": 93, - "Pushing something from right to left": 94, - "Pushing something off of something": 95, - "Pushing something onto something": 96, - "Pushing something so it spins": 97, - "Pushing something so that it almost falls off but doesn't": 98, - "Pushing something so that it falls off the table": 99, - "Pushing something so that it slightly moves": 100, - "Pushing something with something": 101, - "Putting number of something onto something": 102, - "Putting something and something on the table": 103, - "Putting something behind something": 104, - "Putting something in front of something": 105, - "Putting something into something": 106, - "Putting something next to something": 107, - "Putting something on a flat surface without letting it roll": 108, - "Putting something on a surface": 109, - "Putting something on the edge of something so it is not supported and falls down": 110, - "Putting something onto a slanted surface but it doesn't glide down": 111, - "Putting something onto something": 112, - "Putting something onto something else that cannot support it so it falls down": 113, - "Putting something similar to other things that are already on the table": 114, - "Putting something that can't roll onto a slanted surface, so it slides down": 115, - "Putting something that can't roll onto a slanted surface, so it stays where it is": 116, - "Putting something that cannot actually stand upright upright on the table, so it falls on its side": 117, - "Putting something underneath something": 118, - "Putting something upright on the table": 119, - "Putting something, something and something on the table": 120, - "Removing something, revealing something behind": 121, - "Rolling something on a flat surface": 122, - "Scooping something up with something": 123, - "Showing a photo of something to the camera": 124, - "Showing something behind something": 125, - "Showing something next to something": 126, - "Showing something on top of something": 127, - "Showing something to the camera": 128, - "Showing that something is empty": 129, - "Showing that something is inside something": 130, - "Something being deflected from something": 131, - "Something colliding with something and both are being deflected": 132, - "Something colliding with something and both come to a halt": 133, - "Something falling like a feather or paper": 134, - "Something falling like a rock": 135, - "Spilling something behind something": 136, - "Spilling something next to something": 137, - "Spilling something onto something": 138, - "Spinning something so it continues spinning": 139, - "Spinning something that quickly stops spinning": 140, - "Spreading something onto something": 141, - "Sprinkling something onto something": 142, - "Squeezing something": 143, - "Stacking number of something": 144, - "Stuffing something into something": 145, - "Taking one of many similar things on the table": 146, - "Taking something from somewhere": 147, - "Taking something out of something": 148, - "Tearing something into two pieces": 149, - "Tearing something just a little bit": 150, - "Throwing something": 151, - "Throwing something against something": 152, - "Throwing something in the air and catching it": 153, - "Throwing something in the air and letting it fall": 154, - "Throwing something onto a surface": 155, - "Tilting something with something on it slightly so it doesn't fall down": 156, - "Tilting something with something on it until it falls off": 157, - "Tipping something over": 158, - "Tipping something with something in it over, so something in it falls out": 159, - "Touching (without moving) part of something": 160, - "Trying but failing to attach something to something because it doesn't stick": 161, - "Trying to bend something unbendable so nothing happens": 162, - "Trying to pour something into something, but missing so it spills next to it": 163, - "Turning something upside down": 164, - "Turning the camera downwards while filming something": 165, - "Turning the camera left while filming something": 166, - "Turning the camera right while filming something": 167, - "Turning the camera upwards while filming something": 168, - "Twisting (wringing) something wet until water comes out": 169, - "Twisting something": 170, - "Uncovering something": 171, - "Unfolding something": 172, - "Wiping something off of something": 173, - "Moving something and something so they overlap each other": 174, -} -UCF_label_map = { - "ApplyEyeMakeup": 0, - "ApplyLipstick": 1, - "Archery": 2, - "BabyCrawling": 3, - "BalanceBeam": 4, - "BandMarching": 5, - "BaseballPitch": 6, - "Basketball": 7, - "BasketballDunk": 8, - "BenchPress": 9, - "Biking": 10, - "Billiards": 11, - "BlowDryHair": 12, - "BlowingCandles": 13, - "BodyWeightSquats": 14, - "Bowling": 15, - "BoxingPunchingBag": 16, - "BoxingSpeedBag": 17, - "BreastStroke": 18, - "BrushingTeeth": 19, - "CleanAndJerk": 20, - "CliffDiving": 21, - "CricketBowling": 22, - "CricketShot": 23, - "CuttingInKitchen": 24, - "Diving": 25, - "Drumming": 26, - "Fencing": 27, - "FieldHockeyPenalty": 28, - "FloorGymnastics": 29, - "FrisbeeCatch": 30, - "FrontCrawl": 31, - "GolfSwing": 32, - "Haircut": 33, - "Hammering": 34, - "HammerThrow": 35, - "HandstandPushups": 36, - "HandstandWalking": 37, - "HeadMassage": 38, - "HighJump": 39, - "HorseRace": 40, - "HorseRiding": 41, - "HulaHoop": 42, - "IceDancing": 43, - "JavelinThrow": 44, - "JugglingBalls": 45, - "JumpingJack": 46, - "JumpRope": 47, - "Kayaking": 48, - "Knitting": 49, - "LongJump": 50, - "Lunges": 51, - "MilitaryParade": 52, - "Mixing": 53, - "MoppingFloor": 54, - "Nunchucks": 55, - "ParallelBars": 56, - "PizzaTossing": 57, - "PlayingCello": 58, - "PlayingDaf": 59, - "PlayingDhol": 60, - "PlayingFlute": 61, - "PlayingGuitar": 62, - "PlayingPiano": 63, - "PlayingSitar": 64, - "PlayingTabla": 65, - "PlayingViolin": 66, - "PoleVault": 67, - "PommelHorse": 68, - "PullUps": 69, - "Punch": 70, - "PushUps": 71, - "Rafting": 72, - "RockClimbingIndoor": 73, - "RopeClimbing": 74, - "Rowing": 75, - "SalsaSpin": 76, - "ShavingBeard": 77, - "Shotput": 78, - "SkateBoarding": 79, - "Skiing": 80, - "Skijet": 81, - "SkyDiving": 82, - "SoccerJuggling": 83, - "SoccerPenalty": 84, - "StillRings": 85, - "SumoWrestling": 86, - "Surfing": 87, - "Swing": 88, - "TableTennisShot": 89, - "TaiChi": 90, - "TennisSwing": 91, - "ThrowDiscus": 92, - "TrampolineJumping": 93, - "Typing": 94, - "UnevenBars": 95, - "VolleyballSpiking": 96, - "WalkingWithDog": 97, - "WallPushups": 98, - "WritingOnBoard": 99, - "YoYo": 100, -} diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Cs6 Master Collection Winmac - Xforce.zip 137 Kb.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Cs6 Master Collection Winmac - Xforce.zip 137 Kb.md deleted file mode 100644 index 48b5565a53edeb8139c56f160697cb8d6a860432..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Cs6 Master Collection Winmac - Xforce.zip 137 Kb.md +++ /dev/null @@ -1,6 +0,0 @@ -

      adobe cs6 master collection winmac - xforce.zip 137 kb


      Downloadhttps://urlin.us/2uExvI



      -
      -Adobe Cs6 Master Collection Winmac - Xforce.zip 137 Kb adobe+cs6+master+collection+winmac+-+xforce.zip , adobe cs6 master collection ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Airfader Server 2.2 Crack HOT!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Airfader Server 2.2 Crack HOT!.md deleted file mode 100644 index 03ae5a845a103d1fd312246456840fd0d3990c60..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Airfader Server 2.2 Crack HOT!.md +++ /dev/null @@ -1,12 +0,0 @@ - -

      AirFader Server 2.2 Crack: A Risky Way to Control Your Digital Console

      -

      AirFader is a touchscreen mixing software that allows you to remotely control Yamaha digital consoles from your Android or iPad devices[^2^]. It is designed to be a reliable and efficient tool for audio engineers and musicians who need to adjust their personal monitor mixes.

      -

      airfader server 2.2 crack


      Download Zip ❤❤❤ https://urlin.us/2uEwp1



      -

      AirFader Server 2.2 is the latest version of the software that runs on a Windows computer and acts as a server for the mobile devices. It supports Yamaha 01V96, LS9, and M7CL consoles and offers features such as channel naming, scene recall, mute groups, and more[^2^]. The software costs $149.00 and can be purchased from the official website[^2^].

      -

      However, some people may be tempted to look for a cracked version of AirFader Server 2.2 that bypasses the license verification and allows them to use the software for free. This is a risky and illegal way to obtain the software, as it may expose your computer and devices to malware, viruses, or other security threats. Moreover, it may damage your console or compromise your audio quality by introducing glitches, errors, or latency. Furthermore, it may violate the terms of service of AirFader and Yamaha and result in legal consequences.

      -

      Therefore, it is strongly advised to avoid using AirFader Server 2.2 crack and instead purchase the software from the official website. This way, you can enjoy the full benefits of AirFader without risking your equipment, data, or reputation.

      If you are interested in purchasing AirFader Server 2.2, you can visit the official website and click on the "Buy Now" button. You will be redirected to a secure payment page where you can choose your preferred payment method and enter your details. After completing the payment, you will receive an email with a download link and a license key. You can then install the software on your Windows computer and activate it with the license key. You will also be able to download the mobile app for Android or iPad from the Google Play Store or the App Store respectively.

      -

      AirFader has many advantages over other mixing software, such as its simplicity, reliability, and efficiency. It is designed to be easy to use and intuitive, with a touch-friendly interface that mimics the layout of the console. It is also compatible with most Windows computers and mobile devices, without requiring any additional hardware or software. It is also optimized to minimize network traffic and latency, ensuring a smooth and stable performance. Moreover, it offers features that are not available on the console itself, such as channel naming, scene recall, mute groups, and more.

      -

      -

      AirFader is currently only compatible with Yamaha 01V96, LS9, and M7CL consoles. However, the developers are working on adding support for other consoles in the future. You can check the official website for updates and news on AirFader's development. You can also contact the developers if you have any suggestions or feedback on how to improve AirFader.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Alien Shooter 3 Free Download Full Version For Pc _VERIFIED_.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Alien Shooter 3 Free Download Full Version For Pc _VERIFIED_.md deleted file mode 100644 index 82982b475373a3fb2f2a409c19c5611bd94a3076..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Alien Shooter 3 Free Download Full Version For Pc _VERIFIED_.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Alien Shooter 3 Free Download Full Version For Pc


      Download File 🌟 https://urlin.us/2uEwsO



      -
      -Choose from 10 skill levels versus computer in this portable chess game. OS: Windows File Size: 73 KB « Board ». 6,710 downloads. 8.9 / 10. European ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Essay On Internet In Urdu Language !!INSTALL!!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Essay On Internet In Urdu Language !!INSTALL!!.md deleted file mode 100644 index 335a8d734d20570e4bb9d0565589e30f74582005..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Essay On Internet In Urdu Language !!INSTALL!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Essay On Internet In Urdu Language


      Download Filehttps://urlin.us/2uExrZ



      - -Internet ke fayde aur nuksan in urdu essay ... berlin airport restaurant business plan south africa essay on importance of english language in modern world pdf, ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Fsx Aerosoft Manhattan X Crack.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Fsx Aerosoft Manhattan X Crack.md deleted file mode 100644 index 3977ddb3aedef9b6591b70db2a410c0ec19b5f58..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Fsx Aerosoft Manhattan X Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Fsx Aerosoft Manhattan X Crack


      Downloadhttps://urlin.us/2uEyDA



      -
      -Download.free.software.full.vers ion.and.android..11 ... pc fix keygen 3 days zoo ... Minecraft 1.1.0 Cracked [portable] [Updatable] [Online] 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/BETTER Download Saints Row 2 Highly Compressed.md b/spaces/inreVtussa/clothingai/Examples/BETTER Download Saints Row 2 Highly Compressed.md deleted file mode 100644 index bfa7d27f25f989b93fe6f09814cd412c07adf2f2..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/BETTER Download Saints Row 2 Highly Compressed.md +++ /dev/null @@ -1,7 +0,0 @@ -
      -

      Saints Row: The game is complete in most aspects. The game has an excellent graphics,very engaging gameplay and an outstanding soundtrack.If you want to play an open-world game that has a lot of RPG elements to it this is a must.If you are an open-world game fan this is a must for you,you should also download it.This is a game that not only has a lot of open-world elements to it but also a lot of RPG elements,it also has a few unlockable characters,such as the player and various npc’s you can talk to and buy.There are also a lot of powerups you can collect like bionic arm upgrades,superpowers,fast cars and cool side missions,as well as other things.

      -

      Saints Row 2 for Ps2 is the second part of the Saints Row series. It has several problems like I posted in my other post. It looks nice, you get a lot of weapons and more choices, side-missions, superpowers etc. but unfortunately it becomes rather repetitive in the last part. You get the same missions over and over again and you get repeated things in them, like stealing cops cars or that. I found that part a bit boring since the first part was such a rush to steal stuff and run around. I’ve also got a few problems with the controller and it is nothing that can be changed. The code could be more localized, to make all those things you can do, a bit more user friendly. That would have been much more fun.

      -

      Download Saints Row 2 Highly Compressed


      Download Zip –––––>>> https://tiurll.com/2uClNN



      -

      Saints Row 2 is a game of fun and action in which players assume the role of Johnny Gat, a man with a rare photographic memory. The player explores the mean streets of Stilwater, a city where crime and corruption are always in style. Players acquire powers, influence, and wealth by using guile, resourcefulness, and cunning in numerous interactive situations. Players make use of a wide array of weapons, powers, vehicles, and other items to effectively carry out crime and deal with regular problems. The goal of the game is to settle scores, take revenge, and become the ultimate gangster.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/CPU-Tweaker 1.1 Setup TOP Free.md b/spaces/inreVtussa/clothingai/Examples/CPU-Tweaker 1.1 Setup TOP Free.md deleted file mode 100644 index 9458abbceaf8f88f899478b2b157bd1e9d65fac0..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/CPU-Tweaker 1.1 Setup TOP Free.md +++ /dev/null @@ -1,17 +0,0 @@ - -

      CPU-Tweaker 1.1: A Free and Lightweight Tool to Boost Your CPU Performance

      -

      If you are looking for a simple and effective way to overclock your CPU and get more speed out of your computer, you might want to try CPU-Tweaker 1.1. This is a free and lightweight tool that allows you to tweak the CPU timings and improve the performance of your processor.

      -

      CPU-Tweaker 1.1 is designed for CPUs with an integrated memory controller (IMC), such as Intel Core i3, i5, i7, and AMD Phenom and Ryzen processors. It can read and change the principal and secondary timings of your memory, as well as the voltage and frequency settings of your CPU.

      -

      CPU-Tweaker 1.1 Setup Free


      DOWNLOADhttps://tiurll.com/2uCivC



      -

      CPU-Tweaker 1.1 is very easy to use, as it has a user-friendly interface that shows you all the relevant information about your CPU and memory. You can adjust the values by using sliders or typing them manually, and apply the changes with a single click. You can also save and load different profiles for different overclocking scenarios.

      -

      CPU-Tweaker 1.1 also has a built-in stress test feature that lets you check the stability of your overclock settings. You can run the test for a specified amount of time or until an error occurs. If your computer freezes or crashes during the test, you can restart it and reset the settings to normal.

      -

      CPU-Tweaker 1.1 is a portable program that does not require installation. You can run it from any folder or USB drive. It works on Windows XP, Vista, 7, 8, and 10, both 32-bit and 64-bit versions.

      -

      If you want to download CPU-Tweaker 1.1 for free, you can visit its official website at https://www.tweakers.fr/cpu-tweaker.html or one of the trusted software download sites such as MajorGeeks or Softpedia.

      -

      Before you use CPU-Tweaker 1.1, make sure you have a backup of your important data and a reliable cooling system for your CPU. Overclocking can cause overheating, instability, data loss, or hardware damage if done improperly. Use CPU-Tweaker 1.1 at your own risk and responsibility.

      - -

      CPU-Tweaker 1.1 is not the only overclocking software available for Windows, but it has some advantages over other similar tools. For example, CPU-Tweaker 1.1 is more lightweight and consumes less system resources than other programs. It also has a simpler and cleaner interface that makes it easier to use and understand.

      -

      CPU-Tweaker 1.1 is also more compatible with different types of CPUs and memory modules than other tools. It can work with both Intel and AMD processors, as well as DDR2, DDR3, and DDR4 RAM. It can also detect and support the latest CPU models and features, such as Turbo Boost and Hyper-Threading.

      -

      -

      CPU-Tweaker 1.1 is a great tool for beginners and advanced users alike who want to overclock their CPU and get more performance out of their computer. It is free, portable, easy to use, and effective. However, it is not a magic solution that can make your computer run faster without any drawbacks. Overclocking can have negative effects on your system stability, temperature, power consumption, and lifespan. Therefore, you should always be careful and cautious when using CPU-Tweaker 1.1 or any other overclocking software.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Code Soft Tp 3160 Driver BEST.md b/spaces/inreVtussa/clothingai/Examples/Code Soft Tp 3160 Driver BEST.md deleted file mode 100644 index 8559de8c471a68aed0e9a171ce208fab63fbf7ea..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Code Soft Tp 3160 Driver BEST.md +++ /dev/null @@ -1,26 +0,0 @@ -

      Code Soft Tp 3160 Driver


      Download Filehttps://tiurll.com/2uCmrx



      -
      -, the hard driver and the soft driver normally do not play well together. If you want to use both together, it will probably be necessary to go through some effort to ensure they coexist peacefully. - -Because of the architecture of hard disks (as opposed to, say, other DIMMs), the BIOS has to take the hard disk device into the online state on bootup. This enables the BIOS to read the hard disk's partition table and make partition bootable. It also makes it possible for the BIOS to partition disks as it boots. - -Now let's take a look at how to put the disk back into the standby mode. The standby mode is similar to the online state. For example, the hard disk is ready for use again once the power is turned back on after having been turned off. - -You can enter the standby mode in two ways: by pressing the "ESC" key on your keyboard as the computer starts and the hard disk is booting, or by pressing the "power" button on the back of the hard disk drive itself. - -If you have only one hard disk installed in the computer, it will boot up to a disk check prompt. Let's examine the case where your hard disk drive has failed. - -If your computer is booting and you can still access the BIOS setup screen, you can enter the standby mode by pressing the "ESC" key. Alternatively, you can press the power button on the back of the hard disk drive itself to enter the standby mode. - -If your computer is booting and you cannot access the BIOS setup screen, press the power button on the back of the hard disk drive itself to enter the standby mode. Then press the ESC key to make your computer boot up normally. - -Once you have entered the standby mode, you should return to the installation disk menu. If you have only one hard disk in the computer, the hard disk will be in the online state. - -Since you can boot from the installation disk, the only disk that will be in the standby state is the one that you are installing on. So make sure that the disk you want to install to is not in the online state and you can proceed with installation. - -You should note that you can only enter the standby mode on a working hard disk. You cannot enter the standby mode on a failed hard disk. - -You can run a hard disk check on a hard disk in the standby state. If you see errors or problems with the disk 4fefd39f24
      -
      -
      -

      diff --git a/spaces/ispast/Genshin_MB_VITS_TTS/text/__init__.py b/spaces/ispast/Genshin_MB_VITS_TTS/text/__init__.py deleted file mode 100644 index 48ae82f3e40ecd1bf17a7de78d87790327af3362..0000000000000000000000000000000000000000 --- a/spaces/ispast/Genshin_MB_VITS_TTS/text/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/jbilcke-hf/ai-comic-factory/LICENCE.md b/spaces/jbilcke-hf/ai-comic-factory/LICENCE.md deleted file mode 100644 index 537fde8423156f05dc00b52a4fc8eebd451f66e9..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-comic-factory/LICENCE.md +++ /dev/null @@ -1,170 +0,0 @@ -Apache License -============== - -_Version 2.0, January 2004_ -_<>_ - -### Terms and Conditions for use, reproduction, and distribution - -#### 1. Definitions - -“License” shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -“Licensor” shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -“Legal Entity” shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, “control” means **(i)** the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the -outstanding shares, or **(iii)** beneficial ownership of such entity. - -“You” (or “Your”) shall mean an individual or Legal Entity exercising -permissions granted by this License. - -“Source” form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -“Object” form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -“Work” shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -“Derivative Works” shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -“Contribution” shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -“submitted” means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as “Not a Contribution.” - -“Contributor” shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -#### 2. Grant of Copyright License - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -#### 3. Grant of Patent License - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -#### 4. Redistribution - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -* **(a)** You must give any other recipients of the Work or Derivative Works a copy of -this License; and -* **(b)** You must cause any modified files to carry prominent notices stating that You -changed the files; and -* **(c)** You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. - -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -#### 5. Submission of Contributions - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -#### 6. Trademarks - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -#### 7. Disclaimer of Warranty - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -#### 8. Limitation of Liability - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -#### 9. Accepting Warranty or Additional Liability - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -_END OF TERMS AND CONDITIONS_ \ No newline at end of file diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/input.tsx b/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/input.tsx deleted file mode 100644 index 0757ddebdca3800bbd4a46fe1c2c17dff86c5e2f..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/input.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import * as React from "react" - -import { cn } from "@/lib/utils" - -export interface InputProps - extends React.InputHTMLAttributes {} - -const Input = React.forwardRef( - ({ className, type, ...props }, ref) => { - return ( - - ) - } -) -Input.displayName = "Input" - -export { Input } diff --git a/spaces/jie1/jie_test4/app.py b/spaces/jie1/jie_test4/app.py deleted file mode 100644 index ad1c650687829e4e1f8cb67bcfd7075b666aef38..0000000000000000000000000000000000000000 --- a/spaces/jie1/jie_test4/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import gradio as gr -from Sort_Scores import * -from Preinput_Merge import * -from Sort_Dlkcat import * -from Merge_Dlsc import * -from Sort_Sco_Kcat import * -from Plt import * - -with gr.Blocks(css=".gradio-container {background-image: url('file=background.jpeg')}") as demo: - gr.Markdown("Welcome using this demo.") - with gr.Tab("HelloWorld"): - gr.Markdown("Welcome using this demo.") - gr.Markdown("This is a succend test") - gr.Markdown("I think this demo can do some things") - gr.Markdown("在sort里,可以对scores文件,dlkcat文件,以及合并后的scores与dlkcat文件进行排序") - gr.Markdown("Pre Merge里,可以将序列文件与smi文件进行合并,合并后可以进行dlkcat值的计算,合并前若序列文件需要处理换行符也可以对其进行处理") - gr.Markdown("Merge Dlsc里,可以合并scores值文件和dlkcat文件,注意这两个文件序列需要一致") - gr.Markdown("详细的说明可以看Files and versions 里的README.md 文件") - with gr.Tab("Sort"): - file1_input = gr.File(label="输入相关文件") - file1_output1 = gr.File() - file1_output2 = gr.Textbox() - with gr.Row(): - file1_button1 = gr.Button("Sort Scores") - file1_button2 = gr.Button("Sort Dlkcat") - file1_button3 = gr.Button("Sort Mergekcat") - - with gr.Tab("Pre Merge"): - with gr.Row(): - file2_input1 = gr.File(label="strip_file") - file2_input2 = gr.File(label="smi_file") - file2_input3 = gr.File(label="seq_file") - file2_output = gr.File() - file2_button1 = gr.Button("Strip") - with gr.Row(): - file2_button2 = gr.Button("Merge") - file2_button3 = gr.Button("Merge All") - - with gr.Tab("Merge Dlsc"): - with gr.Row(): - file4_input1 = gr.File(label="sc_file") - file4_input2 = gr.File(label="cat_file") - file4_output = gr.File() - file4_button = gr.Button("Merge") - - with gr.Tab("Plt Picture"): - file5_input = gr.File(label="log_file") - file5_output = gr.File() - file5_button = gr.Button("Plt") - - with gr.Accordion("Open for More!"): - gr.Markdown("Look at me...") - - file1_button1.click(Sort_Scores, inputs=file1_input, outputs=file1_output2) - file1_button2.click(Sort_Dlkcat, inputs=file1_input, outputs=file1_output1) - file1_button3.click(Sort_Sco_Kcat, inputs=file1_input, outputs=file1_output1) - - file2_button1.click(Strip, inputs=file2_input1, outputs=file2_output) - file2_button2.click(Merge, inputs=[file2_input2, file2_input3], outputs=file2_output) - file2_button3.click(Merge_All, inputs=[file2_input2, file2_input3], outputs=file2_output) - - file4_button.click(Merge_Dlsc, inputs=[file4_input1, file4_input2], outputs=file4_output) - - file5_button.click(Plt, inputs=file5_input, outputs=file5_output) - -if __name__ == "__main__": - demo.launch() - diff --git a/spaces/jkang/demo-image-pyxelate/gradio_pyxelate.py b/spaces/jkang/demo-image-pyxelate/gradio_pyxelate.py deleted file mode 100644 index a88ebd04c2aea2d824b0296b48dcfd1213caa593..0000000000000000000000000000000000000000 --- a/spaces/jkang/demo-image-pyxelate/gradio_pyxelate.py +++ /dev/null @@ -1,87 +0,0 @@ -'''Pyxelate Demo - -- Based on https://huggingface.co/spaces/akhaliq/Pyxelate -- Credits to akhaliq - -2021-12-16 first created for testing -''' - -import os -from glob import glob -from loguru import logger - -import gradio as gr -from skimage import io as sio -from pyxelate import Pyx, Pal - - -# ----------- Settings ----------- -examples = sorted(glob(os.path.join('examples', '*.jpg'))) -examples = [[image_file, 5, 5, 'none', 'none'] for image_file in examples] - -# ----------- Logging ----------- -logger.add('app.log', mode='a') -logger.info('===== APP RESTARTED =====') - -# ----------- Params ----------- -DOWNSAMPLE_MIN = 1 -DOWNSAMPLE_MAX = 10 -COLOR_MIN = 1 -COLOR_MAX = 20 -PALETTE_CHOICES = [ - 'none', # if not chosen - 'TELETEXT', 'BBC_MICRO', 'CGA_MODE4_PAL1', 'CGA_MODE5_PAL1', - 'CGA_MODE4_PAL2', 'ZX_SPECTRUM', 'APPLE_II_LO', 'APPLE_II_HI', - 'COMMODORE_64', 'GAMEBOY_COMBO_UP', 'GAMEBOY_COMBO_DOWN', 'GAMEBOY_COMBO_LEFT', - 'GAMEBOY_COMBO_RIGHT', 'GAMEBOY_A_UP', 'GAMEBOY_A_DOWN', 'GAMEBOY_A_LEFT', - 'GAMEBOY_A_RIGHT', 'GAMEBOY_B_UP', 'GAMEBOY_B_DOWN', 'GAMEBOY_B_LEFT', - 'GAMEBOY_B_RIGHT', 'GAMEBOY_ORIGINAL', 'GAMEBOY_POCKET', 'GAMEBOY_VIRTUALBOY', - 'MICROSOFT_WINDOWS_16', 'MICROSOFT_WINDOWS_20', 'MICROSOFT_WINDOWS_PAINT', - 'PICO_8', 'MSX', 'MONO_OBRADINN_IBM', 'MONO_OBRADINN_MAC', 'MONO_BJG', 'MONO_BW', - 'MONO_PHOSPHOR_AMBER', 'MONO_PHOSPHOR_LTAMBER', 'MONO_PHOSPHOR_GREEN1', - 'MONO_PHOSPHOR_GREEN2', 'MONO_PHOSPHOR_GREEN3', 'MONO_PHOSPHOR_APPLE', 'APPLE_II_MONO', - 'MONO_PHOSPHOR_APPLEC', 'APPLE_II_MONOC' -] -DITHER_CHOICES = ['none', 'naive', 'bayer', 'floyd', 'atkinson'] - - -def predict(image_obj, sampling_param, color_param, palette_param, dither_param): - img = sio.imread(image_obj.name) - logger.info('--- image loaded') - - if palette_param != 'none': - color_param = 'none' - palette = Pal[palette_param] - else: - palette = color_param - - pyx = Pyx(factor=sampling_param, palette=palette, dither=dither_param) - pyx.fit(img) - img_out = pyx.transform(img) - logger.info('--- output generated') - return img_out - -iface = gr.Interface( - predict, - title='이미지를 픽셀화 시키는 데모입니다.', - description='이미지가 주어졌을 때 이미지를 스케일링하고 색감을 바꾸어 픽셀화 시킬 수 있습니다.', - inputs=[ - gr.inputs.Image(label='인풋 이미지를 준비해주세요', type='file'), - gr.inputs.Slider(label='다운샘플할 크기를 입력해주세요 (클수록 픽셀크기가 커짐 = 저화질)', - minimum=DOWNSAMPLE_MIN, maximum=DOWNSAMPLE_MAX, step=1, default=5), - gr.inputs.Slider(label='사용할 색깔의 개수를 입력해주세요 (선택 시 아래 팔레트 설정은 "none"으로 해주세요)', - minimum=DOWNSAMPLE_MIN, maximum=DOWNSAMPLE_MAX, step=1, default=5), - gr.inputs.Dropdown(label='색깔 팔레트를 선택하세요 (선택 시 위의 색깔 개수는 무시됩니다)', - choices=PALETTE_CHOICES, default='none', type='value'), - gr.inputs.Dropdown(label='결과 이미지의 부드러움 정도를 나타냅니다 ("dithering")', - choices=DITHER_CHOICES, default='none', type='value'), - ], - outputs=[ - gr.outputs.Image(label='결과 이미지 입니다') - ], - examples=examples, - enable_queue=True, - article='

      Credits to GitHub

      ', -) - -iface.launch(debug=True) \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/click/termui.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/click/termui.py deleted file mode 100644 index db7a4b286174fdf26f3251631a2066eda2fa5bea..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/click/termui.py +++ /dev/null @@ -1,784 +0,0 @@ -import inspect -import io -import itertools -import sys -import typing as t -from gettext import gettext as _ - -from ._compat import isatty -from ._compat import strip_ansi -from .exceptions import Abort -from .exceptions import UsageError -from .globals import resolve_color_default -from .types import Choice -from .types import convert_type -from .types import ParamType -from .utils import echo -from .utils import LazyFile - -if t.TYPE_CHECKING: - from ._termui_impl import ProgressBar - -V = t.TypeVar("V") - -# The prompt functions to use. The doc tools currently override these -# functions to customize how they work. -visible_prompt_func: t.Callable[[str], str] = input - -_ansi_colors = { - "black": 30, - "red": 31, - "green": 32, - "yellow": 33, - "blue": 34, - "magenta": 35, - "cyan": 36, - "white": 37, - "reset": 39, - "bright_black": 90, - "bright_red": 91, - "bright_green": 92, - "bright_yellow": 93, - "bright_blue": 94, - "bright_magenta": 95, - "bright_cyan": 96, - "bright_white": 97, -} -_ansi_reset_all = "\033[0m" - - -def hidden_prompt_func(prompt: str) -> str: - import getpass - - return getpass.getpass(prompt) - - -def _build_prompt( - text: str, - suffix: str, - show_default: bool = False, - default: t.Optional[t.Any] = None, - show_choices: bool = True, - type: t.Optional[ParamType] = None, -) -> str: - prompt = text - if type is not None and show_choices and isinstance(type, Choice): - prompt += f" ({', '.join(map(str, type.choices))})" - if default is not None and show_default: - prompt = f"{prompt} [{_format_default(default)}]" - return f"{prompt}{suffix}" - - -def _format_default(default: t.Any) -> t.Any: - if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"): - return default.name - - return default - - -def prompt( - text: str, - default: t.Optional[t.Any] = None, - hide_input: bool = False, - confirmation_prompt: t.Union[bool, str] = False, - type: t.Optional[t.Union[ParamType, t.Any]] = None, - value_proc: t.Optional[t.Callable[[str], t.Any]] = None, - prompt_suffix: str = ": ", - show_default: bool = True, - err: bool = False, - show_choices: bool = True, -) -> t.Any: - """Prompts a user for input. This is a convenience function that can - be used to prompt a user for input later. - - If the user aborts the input by sending an interrupt signal, this - function will catch it and raise a :exc:`Abort` exception. - - :param text: the text to show for the prompt. - :param default: the default value to use if no input happens. If this - is not given it will prompt until it's aborted. - :param hide_input: if this is set to true then the input value will - be hidden. - :param confirmation_prompt: Prompt a second time to confirm the - value. Can be set to a string instead of ``True`` to customize - the message. - :param type: the type to use to check the value against. - :param value_proc: if this parameter is provided it's a function that - is invoked instead of the type conversion to - convert a value. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - :param show_choices: Show or hide choices if the passed type is a Choice. - For example if type is a Choice of either day or week, - show_choices is true and text is "Group by" then the - prompt will be "Group by (day, week): ". - - .. versionadded:: 8.0 - ``confirmation_prompt`` can be a custom string. - - .. versionadded:: 7.0 - Added the ``show_choices`` parameter. - - .. versionadded:: 6.0 - Added unicode support for cmd.exe on Windows. - - .. versionadded:: 4.0 - Added the `err` parameter. - - """ - - def prompt_func(text: str) -> str: - f = hidden_prompt_func if hide_input else visible_prompt_func - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(text.rstrip(" "), nl=False, err=err) - # Echo a space to stdout to work around an issue where - # readline causes backspace to clear the whole line. - return f(" ") - except (KeyboardInterrupt, EOFError): - # getpass doesn't print a newline if the user aborts input with ^C. - # Allegedly this behavior is inherited from getpass(3). - # A doc bug has been filed at https://bugs.python.org/issue24711 - if hide_input: - echo(None, err=err) - raise Abort() from None - - if value_proc is None: - value_proc = convert_type(type, default) - - prompt = _build_prompt( - text, prompt_suffix, show_default, default, show_choices, type - ) - - if confirmation_prompt: - if confirmation_prompt is True: - confirmation_prompt = _("Repeat for confirmation") - - confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix) - - while True: - while True: - value = prompt_func(prompt) - if value: - break - elif default is not None: - value = default - break - try: - result = value_proc(value) - except UsageError as e: - if hide_input: - echo(_("Error: The value you entered was invalid."), err=err) - else: - echo(_("Error: {e.message}").format(e=e), err=err) # noqa: B306 - continue - if not confirmation_prompt: - return result - while True: - value2 = prompt_func(confirmation_prompt) - is_empty = not value and not value2 - if value2 or is_empty: - break - if value == value2: - return result - echo(_("Error: The two entered values do not match."), err=err) - - -def confirm( - text: str, - default: t.Optional[bool] = False, - abort: bool = False, - prompt_suffix: str = ": ", - show_default: bool = True, - err: bool = False, -) -> bool: - """Prompts for confirmation (yes/no question). - - If the user aborts the input by sending a interrupt signal this - function will catch it and raise a :exc:`Abort` exception. - - :param text: the question to ask. - :param default: The default value to use when no input is given. If - ``None``, repeat until input is given. - :param abort: if this is set to `True` a negative answer aborts the - exception by raising :exc:`Abort`. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - - .. versionchanged:: 8.0 - Repeat until input is given if ``default`` is ``None``. - - .. versionadded:: 4.0 - Added the ``err`` parameter. - """ - prompt = _build_prompt( - text, - prompt_suffix, - show_default, - "y/n" if default is None else ("Y/n" if default else "y/N"), - ) - - while True: - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(prompt.rstrip(" "), nl=False, err=err) - # Echo a space to stdout to work around an issue where - # readline causes backspace to clear the whole line. - value = visible_prompt_func(" ").lower().strip() - except (KeyboardInterrupt, EOFError): - raise Abort() from None - if value in ("y", "yes"): - rv = True - elif value in ("n", "no"): - rv = False - elif default is not None and value == "": - rv = default - else: - echo(_("Error: invalid input"), err=err) - continue - break - if abort and not rv: - raise Abort() - return rv - - -def echo_via_pager( - text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str], - color: t.Optional[bool] = None, -) -> None: - """This function takes a text and shows it via an environment specific - pager on stdout. - - .. versionchanged:: 3.0 - Added the `color` flag. - - :param text_or_generator: the text to page, or alternatively, a - generator emitting the text to page. - :param color: controls if the pager supports ANSI colors or not. The - default is autodetection. - """ - color = resolve_color_default(color) - - if inspect.isgeneratorfunction(text_or_generator): - i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)() - elif isinstance(text_or_generator, str): - i = [text_or_generator] - else: - i = iter(t.cast(t.Iterable[str], text_or_generator)) - - # convert every element of i to a text type if necessary - text_generator = (el if isinstance(el, str) else str(el) for el in i) - - from ._termui_impl import pager - - return pager(itertools.chain(text_generator, "\n"), color) - - -def progressbar( - iterable: t.Optional[t.Iterable[V]] = None, - length: t.Optional[int] = None, - label: t.Optional[str] = None, - show_eta: bool = True, - show_percent: t.Optional[bool] = None, - show_pos: bool = False, - item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None, - fill_char: str = "#", - empty_char: str = "-", - bar_template: str = "%(label)s [%(bar)s] %(info)s", - info_sep: str = " ", - width: int = 36, - file: t.Optional[t.TextIO] = None, - color: t.Optional[bool] = None, - update_min_steps: int = 1, -) -> "ProgressBar[V]": - """This function creates an iterable context manager that can be used - to iterate over something while showing a progress bar. It will - either iterate over the `iterable` or `length` items (that are counted - up). While iteration happens, this function will print a rendered - progress bar to the given `file` (defaults to stdout) and will attempt - to calculate remaining time and more. By default, this progress bar - will not be rendered if the file is not a terminal. - - The context manager creates the progress bar. When the context - manager is entered the progress bar is already created. With every - iteration over the progress bar, the iterable passed to the bar is - advanced and the bar is updated. When the context manager exits, - a newline is printed and the progress bar is finalized on screen. - - Note: The progress bar is currently designed for use cases where the - total progress can be expected to take at least several seconds. - Because of this, the ProgressBar class object won't display - progress that is considered too fast, and progress where the time - between steps is less than a second. - - No printing must happen or the progress bar will be unintentionally - destroyed. - - Example usage:: - - with progressbar(items) as bar: - for item in bar: - do_something_with(item) - - Alternatively, if no iterable is specified, one can manually update the - progress bar through the `update()` method instead of directly - iterating over the progress bar. The update method accepts the number - of steps to increment the bar with:: - - with progressbar(length=chunks.total_bytes) as bar: - for chunk in chunks: - process_chunk(chunk) - bar.update(chunks.bytes) - - The ``update()`` method also takes an optional value specifying the - ``current_item`` at the new position. This is useful when used - together with ``item_show_func`` to customize the output for each - manual step:: - - with click.progressbar( - length=total_size, - label='Unzipping archive', - item_show_func=lambda a: a.filename - ) as bar: - for archive in zip_file: - archive.extract() - bar.update(archive.size, archive) - - :param iterable: an iterable to iterate over. If not provided the length - is required. - :param length: the number of items to iterate over. By default the - progressbar will attempt to ask the iterator about its - length, which might or might not work. If an iterable is - also provided this parameter can be used to override the - length. If an iterable is not provided the progress bar - will iterate over a range of that length. - :param label: the label to show next to the progress bar. - :param show_eta: enables or disables the estimated time display. This is - automatically disabled if the length cannot be - determined. - :param show_percent: enables or disables the percentage display. The - default is `True` if the iterable has a length or - `False` if not. - :param show_pos: enables or disables the absolute position display. The - default is `False`. - :param item_show_func: A function called with the current item which - can return a string to show next to the progress bar. If the - function returns ``None`` nothing is shown. The current item can - be ``None``, such as when entering and exiting the bar. - :param fill_char: the character to use to show the filled part of the - progress bar. - :param empty_char: the character to use to show the non-filled part of - the progress bar. - :param bar_template: the format string to use as template for the bar. - The parameters in it are ``label`` for the label, - ``bar`` for the progress bar and ``info`` for the - info section. - :param info_sep: the separator between multiple info items (eta etc.) - :param width: the width of the progress bar in characters, 0 means full - terminal width - :param file: The file to write to. If this is not a terminal then - only the label is printed. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are included anywhere in the progress bar output - which is not the case by default. - :param update_min_steps: Render only when this many updates have - completed. This allows tuning for very fast iterators. - - .. versionchanged:: 8.0 - Output is shown even if execution time is less than 0.5 seconds. - - .. versionchanged:: 8.0 - ``item_show_func`` shows the current item, not the previous one. - - .. versionchanged:: 8.0 - Labels are echoed if the output is not a TTY. Reverts a change - in 7.0 that removed all output. - - .. versionadded:: 8.0 - Added the ``update_min_steps`` parameter. - - .. versionchanged:: 4.0 - Added the ``color`` parameter. Added the ``update`` method to - the object. - - .. versionadded:: 2.0 - """ - from ._termui_impl import ProgressBar - - color = resolve_color_default(color) - return ProgressBar( - iterable=iterable, - length=length, - show_eta=show_eta, - show_percent=show_percent, - show_pos=show_pos, - item_show_func=item_show_func, - fill_char=fill_char, - empty_char=empty_char, - bar_template=bar_template, - info_sep=info_sep, - file=file, - label=label, - width=width, - color=color, - update_min_steps=update_min_steps, - ) - - -def clear() -> None: - """Clears the terminal screen. This will have the effect of clearing - the whole visible space of the terminal and moving the cursor to the - top left. This does not do anything if not connected to a terminal. - - .. versionadded:: 2.0 - """ - if not isatty(sys.stdout): - return - - # ANSI escape \033[2J clears the screen, \033[1;1H moves the cursor - echo("\033[2J\033[1;1H", nl=False) - - -def _interpret_color( - color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0 -) -> str: - if isinstance(color, int): - return f"{38 + offset};5;{color:d}" - - if isinstance(color, (tuple, list)): - r, g, b = color - return f"{38 + offset};2;{r:d};{g:d};{b:d}" - - return str(_ansi_colors[color] + offset) - - -def style( - text: t.Any, - fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None, - bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None, - bold: t.Optional[bool] = None, - dim: t.Optional[bool] = None, - underline: t.Optional[bool] = None, - overline: t.Optional[bool] = None, - italic: t.Optional[bool] = None, - blink: t.Optional[bool] = None, - reverse: t.Optional[bool] = None, - strikethrough: t.Optional[bool] = None, - reset: bool = True, -) -> str: - """Styles a text with ANSI styles and returns the new string. By - default the styling is self contained which means that at the end - of the string a reset code is issued. This can be prevented by - passing ``reset=False``. - - Examples:: - - click.echo(click.style('Hello World!', fg='green')) - click.echo(click.style('ATTENTION!', blink=True)) - click.echo(click.style('Some things', reverse=True, fg='cyan')) - click.echo(click.style('More colors', fg=(255, 12, 128), bg=117)) - - Supported color names: - - * ``black`` (might be a gray) - * ``red`` - * ``green`` - * ``yellow`` (might be an orange) - * ``blue`` - * ``magenta`` - * ``cyan`` - * ``white`` (might be light gray) - * ``bright_black`` - * ``bright_red`` - * ``bright_green`` - * ``bright_yellow`` - * ``bright_blue`` - * ``bright_magenta`` - * ``bright_cyan`` - * ``bright_white`` - * ``reset`` (reset the color code only) - - If the terminal supports it, color may also be specified as: - - - An integer in the interval [0, 255]. The terminal must support - 8-bit/256-color mode. - - An RGB tuple of three integers in [0, 255]. The terminal must - support 24-bit/true-color mode. - - See https://en.wikipedia.org/wiki/ANSI_color and - https://gist.github.com/XVilka/8346728 for more information. - - :param text: the string to style with ansi codes. - :param fg: if provided this will become the foreground color. - :param bg: if provided this will become the background color. - :param bold: if provided this will enable or disable bold mode. - :param dim: if provided this will enable or disable dim mode. This is - badly supported. - :param underline: if provided this will enable or disable underline. - :param overline: if provided this will enable or disable overline. - :param italic: if provided this will enable or disable italic. - :param blink: if provided this will enable or disable blinking. - :param reverse: if provided this will enable or disable inverse - rendering (foreground becomes background and the - other way round). - :param strikethrough: if provided this will enable or disable - striking through text. - :param reset: by default a reset-all code is added at the end of the - string which means that styles do not carry over. This - can be disabled to compose styles. - - .. versionchanged:: 8.0 - A non-string ``message`` is converted to a string. - - .. versionchanged:: 8.0 - Added support for 256 and RGB color codes. - - .. versionchanged:: 8.0 - Added the ``strikethrough``, ``italic``, and ``overline`` - parameters. - - .. versionchanged:: 7.0 - Added support for bright colors. - - .. versionadded:: 2.0 - """ - if not isinstance(text, str): - text = str(text) - - bits = [] - - if fg: - try: - bits.append(f"\033[{_interpret_color(fg)}m") - except KeyError: - raise TypeError(f"Unknown color {fg!r}") from None - - if bg: - try: - bits.append(f"\033[{_interpret_color(bg, 10)}m") - except KeyError: - raise TypeError(f"Unknown color {bg!r}") from None - - if bold is not None: - bits.append(f"\033[{1 if bold else 22}m") - if dim is not None: - bits.append(f"\033[{2 if dim else 22}m") - if underline is not None: - bits.append(f"\033[{4 if underline else 24}m") - if overline is not None: - bits.append(f"\033[{53 if overline else 55}m") - if italic is not None: - bits.append(f"\033[{3 if italic else 23}m") - if blink is not None: - bits.append(f"\033[{5 if blink else 25}m") - if reverse is not None: - bits.append(f"\033[{7 if reverse else 27}m") - if strikethrough is not None: - bits.append(f"\033[{9 if strikethrough else 29}m") - bits.append(text) - if reset: - bits.append(_ansi_reset_all) - return "".join(bits) - - -def unstyle(text: str) -> str: - """Removes ANSI styling information from a string. Usually it's not - necessary to use this function as Click's echo function will - automatically remove styling if necessary. - - .. versionadded:: 2.0 - - :param text: the text to remove style information from. - """ - return strip_ansi(text) - - -def secho( - message: t.Optional[t.Any] = None, - file: t.Optional[t.IO[t.AnyStr]] = None, - nl: bool = True, - err: bool = False, - color: t.Optional[bool] = None, - **styles: t.Any, -) -> None: - """This function combines :func:`echo` and :func:`style` into one - call. As such the following two calls are the same:: - - click.secho('Hello World!', fg='green') - click.echo(click.style('Hello World!', fg='green')) - - All keyword arguments are forwarded to the underlying functions - depending on which one they go with. - - Non-string types will be converted to :class:`str`. However, - :class:`bytes` are passed directly to :meth:`echo` without applying - style. If you want to style bytes that represent text, call - :meth:`bytes.decode` first. - - .. versionchanged:: 8.0 - A non-string ``message`` is converted to a string. Bytes are - passed through without style applied. - - .. versionadded:: 2.0 - """ - if message is not None and not isinstance(message, (bytes, bytearray)): - message = style(message, **styles) - - return echo(message, file=file, nl=nl, err=err, color=color) - - -def edit( - text: t.Optional[t.AnyStr] = None, - editor: t.Optional[str] = None, - env: t.Optional[t.Mapping[str, str]] = None, - require_save: bool = True, - extension: str = ".txt", - filename: t.Optional[str] = None, -) -> t.Optional[t.AnyStr]: - r"""Edits the given text in the defined editor. If an editor is given - (should be the full path to the executable but the regular operating - system search path is used for finding the executable) it overrides - the detected editor. Optionally, some environment variables can be - used. If the editor is closed without changes, `None` is returned. In - case a file is edited directly the return value is always `None` and - `require_save` and `extension` are ignored. - - If the editor cannot be opened a :exc:`UsageError` is raised. - - Note for Windows: to simplify cross-platform usage, the newlines are - automatically converted from POSIX to Windows and vice versa. As such, - the message here will have ``\n`` as newline markers. - - :param text: the text to edit. - :param editor: optionally the editor to use. Defaults to automatic - detection. - :param env: environment variables to forward to the editor. - :param require_save: if this is true, then not saving in the editor - will make the return value become `None`. - :param extension: the extension to tell the editor about. This defaults - to `.txt` but changing this might change syntax - highlighting. - :param filename: if provided it will edit this file instead of the - provided text contents. It will not use a temporary - file as an indirection in that case. - """ - from ._termui_impl import Editor - - ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension) - - if filename is None: - return ed.edit(text) - - ed.edit_file(filename) - return None - - -def launch(url: str, wait: bool = False, locate: bool = False) -> int: - """This function launches the given URL (or filename) in the default - viewer application for this file type. If this is an executable, it - might launch the executable in a new session. The return value is - the exit code of the launched application. Usually, ``0`` indicates - success. - - Examples:: - - click.launch('https://click.palletsprojects.com/') - click.launch('/my/downloaded/file', locate=True) - - .. versionadded:: 2.0 - - :param url: URL or filename of the thing to launch. - :param wait: Wait for the program to exit before returning. This - only works if the launched program blocks. In particular, - ``xdg-open`` on Linux does not block. - :param locate: if this is set to `True` then instead of launching the - application associated with the URL it will attempt to - launch a file manager with the file located. This - might have weird effects if the URL does not point to - the filesystem. - """ - from ._termui_impl import open_url - - return open_url(url, wait=wait, locate=locate) - - -# If this is provided, getchar() calls into this instead. This is used -# for unittesting purposes. -_getchar: t.Optional[t.Callable[[bool], str]] = None - - -def getchar(echo: bool = False) -> str: - """Fetches a single character from the terminal and returns it. This - will always return a unicode character and under certain rare - circumstances this might return more than one character. The - situations which more than one character is returned is when for - whatever reason multiple characters end up in the terminal buffer or - standard input was not actually a terminal. - - Note that this will always read from the terminal, even if something - is piped into the standard input. - - Note for Windows: in rare cases when typing non-ASCII characters, this - function might wait for a second character and then return both at once. - This is because certain Unicode characters look like special-key markers. - - .. versionadded:: 2.0 - - :param echo: if set to `True`, the character read will also show up on - the terminal. The default is to not show it. - """ - global _getchar - - if _getchar is None: - from ._termui_impl import getchar as f - - _getchar = f - - return _getchar(echo) - - -def raw_terminal() -> t.ContextManager[int]: - from ._termui_impl import raw_terminal as f - - return f() - - -def pause(info: t.Optional[str] = None, err: bool = False) -> None: - """This command stops execution and waits for the user to press any - key to continue. This is similar to the Windows batch "pause" - command. If the program is not run through a terminal, this command - will instead do nothing. - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param info: The message to print before pausing. Defaults to - ``"Press any key to continue..."``. - :param err: if set to message goes to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - if not isatty(sys.stdin) or not isatty(sys.stdout): - return - - if info is None: - info = _("Press any key to continue...") - - try: - if info: - echo(info, nl=False, err=err) - try: - getchar() - except (KeyboardInterrupt, EOFError): - pass - finally: - if info: - echo(err=err) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/registry.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/registry.py deleted file mode 100644 index 851bc65bc8fa1ea01a48d425563bce06ccfe8ecd..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/registry.py +++ /dev/null @@ -1,297 +0,0 @@ -from __future__ import annotations - -import importlib -import types -import warnings - -__all__ = ["registry", "get_filesystem_class", "default"] - -# internal, mutable -_registry: dict[str, type] = {} - -# external, immutable -registry = types.MappingProxyType(_registry) -default = "file" - - -def register_implementation(name, cls, clobber=False, errtxt=None): - """Add implementation class to the registry - - Parameters - ---------- - name: str - Protocol name to associate with the class - cls: class or str - if a class: fsspec-compliant implementation class (normally inherits from - ``fsspec.AbstractFileSystem``, gets added straight to the registry. If a - str, the full path to an implementation class like package.module.class, - which gets added to known_implementations, - so the import is deferred until the filesystem is actually used. - clobber: bool (optional) - Whether to overwrite a protocol with the same name; if False, will raise - instead. - errtxt: str (optional) - If given, then a failure to import the given class will result in this - text being given. - """ - if isinstance(cls, str): - if name in known_implementations and clobber is False: - if cls != known_implementations[name]["class"]: - raise ValueError( - "Name (%s) already in the known_implementations and clobber " - "is False" % name - ) - else: - known_implementations[name] = { - "class": cls, - "err": errtxt or "%s import failed for protocol %s" % (cls, name), - } - - else: - if name in registry and clobber is False: - if _registry[name] is not cls: - raise ValueError( - "Name (%s) already in the registry and clobber is False" % name - ) - else: - _registry[name] = cls - - -# protocols mapped to the class which implements them. This dict can -# updated with register_implementation -known_implementations = { - "file": {"class": "fsspec.implementations.local.LocalFileSystem"}, - "memory": {"class": "fsspec.implementations.memory.MemoryFileSystem"}, - "dropbox": { - "class": "dropboxdrivefs.DropboxDriveFileSystem", - "err": ( - 'DropboxFileSystem requires "dropboxdrivefs",' - '"requests" and "dropbox" to be installed' - ), - }, - "http": { - "class": "fsspec.implementations.http.HTTPFileSystem", - "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed', - }, - "https": { - "class": "fsspec.implementations.http.HTTPFileSystem", - "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed', - }, - "zip": {"class": "fsspec.implementations.zip.ZipFileSystem"}, - "tar": {"class": "fsspec.implementations.tar.TarFileSystem"}, - "gcs": { - "class": "gcsfs.GCSFileSystem", - "err": "Please install gcsfs to access Google Storage", - }, - "gs": { - "class": "gcsfs.GCSFileSystem", - "err": "Please install gcsfs to access Google Storage", - }, - "gdrive": { - "class": "gdrivefs.GoogleDriveFileSystem", - "err": "Please install gdrivefs for access to Google Drive", - }, - "sftp": { - "class": "fsspec.implementations.sftp.SFTPFileSystem", - "err": 'SFTPFileSystem requires "paramiko" to be installed', - }, - "ssh": { - "class": "fsspec.implementations.sftp.SFTPFileSystem", - "err": 'SFTPFileSystem requires "paramiko" to be installed', - }, - "ftp": {"class": "fsspec.implementations.ftp.FTPFileSystem"}, - "hdfs": { - "class": "fsspec.implementations.arrow.HadoopFileSystem", - "err": "pyarrow and local java libraries required for HDFS", - }, - "arrow_hdfs": { - "class": "fsspec.implementations.arrow.HadoopFileSystem", - "err": "pyarrow and local java libraries required for HDFS", - }, - "webhdfs": { - "class": "fsspec.implementations.webhdfs.WebHDFS", - "err": 'webHDFS access requires "requests" to be installed', - }, - "s3": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"}, - "s3a": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"}, - "wandb": {"class": "wandbfs.WandbFS", "err": "Install wandbfs to access wandb"}, - "oci": { - "class": "ocifs.OCIFileSystem", - "err": "Install ocifs to access OCI Object Storage", - }, - "ocilake": { - "class": "ocifs.OCIFileSystem", - "err": "Install ocifs to access OCI Data Lake", - }, - "asynclocal": { - "class": "morefs.asyn_local.AsyncLocalFileSystem", - "err": "Install 'morefs[asynclocalfs]' to use AsyncLocalFileSystem", - }, - "adl": { - "class": "adlfs.AzureDatalakeFileSystem", - "err": "Install adlfs to access Azure Datalake Gen1", - }, - "abfs": { - "class": "adlfs.AzureBlobFileSystem", - "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage", - }, - "az": { - "class": "adlfs.AzureBlobFileSystem", - "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage", - }, - "cached": {"class": "fsspec.implementations.cached.CachingFileSystem"}, - "blockcache": {"class": "fsspec.implementations.cached.CachingFileSystem"}, - "filecache": {"class": "fsspec.implementations.cached.WholeFileCacheFileSystem"}, - "simplecache": {"class": "fsspec.implementations.cached.SimpleCacheFileSystem"}, - "dask": { - "class": "fsspec.implementations.dask.DaskWorkerFileSystem", - "err": "Install dask distributed to access worker file system", - }, - "dbfs": { - "class": "fsspec.implementations.dbfs.DatabricksFileSystem", - "err": "Install the requests package to use the DatabricksFileSystem", - }, - "github": { - "class": "fsspec.implementations.github.GithubFileSystem", - "err": "Install the requests package to use the github FS", - }, - "git": { - "class": "fsspec.implementations.git.GitFileSystem", - "err": "Install pygit2 to browse local git repos", - }, - "smb": { - "class": "fsspec.implementations.smb.SMBFileSystem", - "err": 'SMB requires "smbprotocol" or "smbprotocol[kerberos]" installed', - }, - "jupyter": { - "class": "fsspec.implementations.jupyter.JupyterFileSystem", - "err": "Jupyter FS requires requests to be installed", - }, - "jlab": { - "class": "fsspec.implementations.jupyter.JupyterFileSystem", - "err": "Jupyter FS requires requests to be installed", - }, - "libarchive": { - "class": "fsspec.implementations.libarchive.LibArchiveFileSystem", - "err": "LibArchive requires to be installed", - }, - "reference": {"class": "fsspec.implementations.reference.ReferenceFileSystem"}, - "generic": {"class": "fsspec.generic.GenericFileSystem"}, - "oss": { - "class": "ossfs.OSSFileSystem", - "err": "Install ossfs to access Alibaba Object Storage System", - }, - "webdav": { - "class": "webdav4.fsspec.WebdavFileSystem", - "err": "Install webdav4 to access WebDAV", - }, - "dvc": { - "class": "dvc.api.DVCFileSystem", - "err": "Install dvc to access DVCFileSystem", - }, - "hf": { - "class": "huggingface_hub.HfFileSystem", - "err": "Install huggingface_hub to access HfFileSystem", - }, - "root": { - "class": "fsspec_xrootd.XRootDFileSystem", - "err": "Install fsspec-xrootd to access xrootd storage system." - + " Note: 'root' is the protocol name for xrootd storage systems," - + " not referring to root directories", - }, - "dir": {"class": "fsspec.implementations.dirfs.DirFileSystem"}, - "box": { - "class": "boxfs.BoxFileSystem", - "err": "Please install boxfs to access BoxFileSystem", - }, - "lakefs": { - "class": "lakefs_spec.LakeFSFileSystem", - "err": "Please install lakefs-spec to access LakeFSFileSystem", - }, -} - - -def get_filesystem_class(protocol): - """Fetch named protocol implementation from the registry - - The dict ``known_implementations`` maps protocol names to the locations - of classes implementing the corresponding file-system. When used for the - first time, appropriate imports will happen and the class will be placed in - the registry. All subsequent calls will fetch directly from the registry. - - Some protocol implementations require additional dependencies, and so the - import may fail. In this case, the string in the "err" field of the - ``known_implementations`` will be given as the error message. - """ - if not protocol: - protocol = default - - if protocol not in registry: - if protocol not in known_implementations: - raise ValueError("Protocol not known: %s" % protocol) - bit = known_implementations[protocol] - try: - register_implementation(protocol, _import_class(bit["class"])) - except ImportError as e: - raise ImportError(bit["err"]) from e - cls = registry[protocol] - if getattr(cls, "protocol", None) in ("abstract", None): - cls.protocol = protocol - - return cls - - -s3_msg = """Your installed version of s3fs is very old and known to cause -severe performance issues, see also https://github.com/dask/dask/issues/10276 - -To fix, you should specify a lower version bound on s3fs, or -update the current installation. -""" - - -def _import_class(cls, minv=None): - """Take a string FQP and return the imported class or identifier - - clas is of the form "package.module.klass" or "package.module:subobject.klass" - """ - if ":" in cls: - mod, name = cls.rsplit(":", 1) - s3 = mod == "s3fs" - mod = importlib.import_module(mod) - if s3 and mod.__version__.split(".") < ["0", "5"]: - warnings.warn(s3_msg) - for part in name.split("."): - mod = getattr(mod, part) - return mod - else: - mod, name = cls.rsplit(".", 1) - s3 = mod == "s3fs" - mod = importlib.import_module(mod) - if s3 and mod.__version__.split(".") < ["0", "5"]: - warnings.warn(s3_msg) - return getattr(mod, name) - - -def filesystem(protocol, **storage_options): - """Instantiate filesystems for given protocol and arguments - - ``storage_options`` are specific to the protocol being chosen, and are - passed directly to the class. - """ - if protocol == "arrow_hdfs": - warnings.warn( - "The 'arrow_hdfs' protocol has been deprecated and will be " - "removed in the future. Specify it as 'hdfs'.", - DeprecationWarning, - ) - - cls = get_filesystem_class(protocol) - return cls(**storage_options) - - -def available_protocols(): - """Return a list of the implemented protocols. - - Note that any given protocol may require extra packages to be importable. - """ - return list(known_implementations) diff --git a/spaces/joushe/moe-tts/text/sanskrit.py b/spaces/joushe/moe-tts/text/sanskrit.py deleted file mode 100644 index 0223aaac384a2f850f5bc20651fc18eb964607d0..0000000000000000000000000000000000000000 --- a/spaces/joushe/moe-tts/text/sanskrit.py +++ /dev/null @@ -1,62 +0,0 @@ -import re -from indic_transliteration import sanscript - - -# List of (iast, ipa) pairs: -_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('a', 'ə'), - ('ā', 'aː'), - ('ī', 'iː'), - ('ū', 'uː'), - ('ṛ', 'ɹ`'), - ('ṝ', 'ɹ`ː'), - ('ḷ', 'l`'), - ('ḹ', 'l`ː'), - ('e', 'eː'), - ('o', 'oː'), - ('k', 'k⁼'), - ('k⁼h', 'kʰ'), - ('g', 'g⁼'), - ('g⁼h', 'gʰ'), - ('ṅ', 'ŋ'), - ('c', 'ʧ⁼'), - ('ʧ⁼h', 'ʧʰ'), - ('j', 'ʥ⁼'), - ('ʥ⁼h', 'ʥʰ'), - ('ñ', 'n^'), - ('ṭ', 't`⁼'), - ('t`⁼h', 't`ʰ'), - ('ḍ', 'd`⁼'), - ('d`⁼h', 'd`ʰ'), - ('ṇ', 'n`'), - ('t', 't⁼'), - ('t⁼h', 'tʰ'), - ('d', 'd⁼'), - ('d⁼h', 'dʰ'), - ('p', 'p⁼'), - ('p⁼h', 'pʰ'), - ('b', 'b⁼'), - ('b⁼h', 'bʰ'), - ('y', 'j'), - ('ś', 'ʃ'), - ('ṣ', 's`'), - ('r', 'ɾ'), - ('l̤', 'l`'), - ('h', 'ɦ'), - ("'", ''), - ('~', '^'), - ('ṃ', '^') -]] - - -def devanagari_to_ipa(text): - text = text.replace('ॐ', 'ओम्') - text = re.sub(r'\s*।\s*$', '.', text) - text = re.sub(r'\s*।\s*', ', ', text) - text = re.sub(r'\s*॥', '.', text) - text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST) - for regex, replacement in _iast_to_ipa: - text = re.sub(regex, replacement, text) - text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0) - [:-1]+'h'+x.group(1)+'*', text) - return text diff --git a/spaces/justest/ai-support/README.md b/spaces/justest/ai-support/README.md deleted file mode 100644 index b4b34ff25b551529947161a19a670e4c5718b0dd..0000000000000000000000000000000000000000 --- a/spaces/justest/ai-support/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: ai-support -emoji: 💻🐳 -colorFrom: red -colorTo: blue -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/kadirnar/AnimeSR/README.md b/spaces/kadirnar/AnimeSR/README.md deleted file mode 100644 index 83ccae7d07e0ad2ef254cf3d46e632614b911d69..0000000000000000000000000000000000000000 --- a/spaces/kadirnar/AnimeSR/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: AnimeSR -emoji: 👀 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: apache-2.0 -tags: -- making-demos ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/kaicheng/ChatGPT_ad/modules/config.py b/spaces/kaicheng/ChatGPT_ad/modules/config.py deleted file mode 100644 index c9224996dd7056508519be8cbe906746f362abb0..0000000000000000000000000000000000000000 --- a/spaces/kaicheng/ChatGPT_ad/modules/config.py +++ /dev/null @@ -1,190 +0,0 @@ -from collections import defaultdict -from contextlib import contextmanager -import os -import logging -import sys -import commentjson as json - -from . import shared -from . import presets - - -__all__ = [ - "my_api_key", - "authflag", - "auth_list", - "dockerflag", - "retrieve_proxy", - "log_level", - "advance_docs", - "update_doc_config", - "usage_limit", - "multi_api_key", - "server_name", - "server_port", - "share", - "hide_history_when_not_logged_in", - "default_chuanhu_assistant_model" -] - -# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低) -# 同时,也可以为后续支持自定义功能提供config的帮助 -if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) -else: - config = {} - -lang_config = config.get("language", "auto") -language = os.environ.get("LANGUAGE", lang_config) - -hide_history_when_not_logged_in = config.get("hide_history_when_not_logged_in", False) - -if os.path.exists("api_key.txt"): - logging.info("检测到api_key.txt文件,正在进行迁移...") - with open("api_key.txt", "r", encoding="utf-8") as f: - config["openai_api_key"] = f.read().strip() - os.rename("api_key.txt", "api_key(deprecated).txt") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4, ensure_ascii=False) - -if os.path.exists("auth.json"): - logging.info("检测到auth.json文件,正在进行迁移...") - auth_list = [] - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - for _ in auth: - if auth[_]["username"] and auth[_]["password"]: - auth_list.append((auth[_]["username"], auth[_]["password"])) - else: - logging.error("请检查auth.json文件中的用户名和密码!") - sys.exit(1) - config["users"] = auth_list - os.rename("auth.json", "auth(deprecated).json") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4, ensure_ascii=False) - -## 处理docker if we are running in Docker -dockerflag = config.get("dockerflag", False) -if os.environ.get("dockerrun") == "yes": - dockerflag = True - -## 处理 api-key 以及 允许的用户列表 -my_api_key = config.get("openai_api_key", "") -my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key) - -xmchat_api_key = config.get("xmchat_api_key", "") -os.environ["XMCHAT_API_KEY"] = xmchat_api_key - -minimax_api_key = config.get("minimax_api_key", "") -os.environ["MINIMAX_API_KEY"] = minimax_api_key -minimax_group_id = config.get("minimax_group_id", "") -os.environ["MINIMAX_GROUP_ID"] = minimax_group_id - - -usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120)) - -## 多账户机制 -multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制 -if multi_api_key: - api_key_list = config.get("api_key_list", []) - if len(api_key_list) == 0: - logging.error("多账号模式已开启,但api_key_list为空,请检查config.json") - sys.exit(1) - shared.state.set_api_key_queue(api_key_list) - -auth_list = config.get("users", []) # 实际上是使用者的列表 -authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度 - -# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配 -api_host = os.environ.get("OPENAI_API_BASE", config.get("openai_api_base", None)) -if api_host is not None: - shared.state.set_api_host(api_host) - -default_chuanhu_assistant_model = config.get("default_chuanhu_assistant_model", "gpt-3.5-turbo") -for x in ["GOOGLE_CSE_ID", "GOOGLE_API_KEY", "WOLFRAM_ALPHA_APPID", "SERPAPI_API_KEY"]: - if config.get(x, None) is not None: - os.environ[x] = config[x] - -@contextmanager -def retrieve_openai_api(api_key = None): - old_api_key = os.environ.get("OPENAI_API_KEY", "") - if api_key is None: - os.environ["OPENAI_API_KEY"] = my_api_key - yield my_api_key - else: - os.environ["OPENAI_API_KEY"] = api_key - yield api_key - os.environ["OPENAI_API_KEY"] = old_api_key - -## 处理log -log_level = config.get("log_level", "INFO") -logging.basicConfig( - level=log_level, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -## 处理代理: -http_proxy = config.get("http_proxy", "") -https_proxy = config.get("https_proxy", "") -http_proxy = os.environ.get("HTTP_PROXY", http_proxy) -https_proxy = os.environ.get("HTTPS_PROXY", https_proxy) - -# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错 -os.environ["HTTP_PROXY"] = "" -os.environ["HTTPS_PROXY"] = "" - -local_embedding = config.get("local_embedding", False) # 是否使用本地embedding - -@contextmanager -def retrieve_proxy(proxy=None): - """ - 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理 - 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量 - """ - global http_proxy, https_proxy - if proxy is not None: - http_proxy = proxy - https_proxy = proxy - yield http_proxy, https_proxy - else: - old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] - os.environ["HTTP_PROXY"] = http_proxy - os.environ["HTTPS_PROXY"] = https_proxy - yield http_proxy, https_proxy # return new proxy - - # return old proxy - os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var - - -## 处理advance docs -advance_docs = defaultdict(lambda: defaultdict(dict)) -advance_docs.update(config.get("advance_docs", {})) -def update_doc_config(two_column_pdf): - global advance_docs - advance_docs["pdf"]["two_column"] = two_column_pdf - - logging.info(f"更新后的文件参数为:{advance_docs}") - -## 处理gradio.launch参数 -server_name = config.get("server_name", None) -server_port = config.get("server_port", None) -if server_name is None: - if dockerflag: - server_name = "0.0.0.0" - else: - server_name = "127.0.0.1" -if server_port is None: - if dockerflag: - server_port = 7860 - -assert server_port is None or type(server_port) == int, "要求port设置为int类型" - -# 设置默认model -default_model = config.get("default_model", "") -try: - presets.DEFAULT_MODEL = presets.MODELS.index(default_model) -except ValueError: - pass - -share = config.get("share", False) diff --git a/spaces/kangvcar/RealChar/CHANGELOG.md b/spaces/kangvcar/RealChar/CHANGELOG.md deleted file mode 100644 index 8adabf357fb0f496f0e407a91aca915e78deaded..0000000000000000000000000000000000000000 --- a/spaces/kangvcar/RealChar/CHANGELOG.md +++ /dev/null @@ -1,22 +0,0 @@ -# ChangeLog - -## [v0.0.1] - 2023-07-19 -Release Highlights: - -### Product releases and updates: -- iOS App TestFlight public beta (link https://testflight.apple.com/join/JA6p9sZQ) -- Rewrite Web codebase from vanilla JavaScript to use React framework w/ Javascript -- Support Unicode in chat messages -- Various UI refinements - -### Integration updates: -- Support Azure OpenAI - -### Observability and quality updates: -- Support Integration with LangSmith -- Reduce Docker rebuild time to ~2 seconds -- Support string based user ID -- Support Session ID, Platform, Action Type in database records. - -### New Tutorial: -[How to make your own AI character and run it locally](https://youtu.be/meg5Q8vdWeQ) diff --git a/spaces/kcagle/AutoGPT/autogpt/speech/gtts.py b/spaces/kcagle/AutoGPT/autogpt/speech/gtts.py deleted file mode 100644 index 1c3e9cae0567428582891b11eca42f82a64f5c8e..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/speech/gtts.py +++ /dev/null @@ -1,22 +0,0 @@ -""" GTTS Voice. """ -import os - -import gtts -from playsound import playsound - -from autogpt.speech.base import VoiceBase - - -class GTTSVoice(VoiceBase): - """GTTS Voice.""" - - def _setup(self) -> None: - pass - - def _speech(self, text: str, _: int = 0) -> bool: - """Play the given text.""" - tts = gtts.gTTS(text) - tts.save("speech.mp3") - playsound("speech.mp3", True) - os.remove("speech.mp3") - return True diff --git a/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer_train.py b/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer_train.py deleted file mode 100644 index 2743d590d882f209734b68921b84a9d23492942c..0000000000000000000000000000000000000000 --- a/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer_train.py +++ /dev/null @@ -1,35 +0,0 @@ -from synthesizer.hparams import hparams -from synthesizer.train import train -from utils.argutils import print_args -import argparse - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("run_id", type=str, help= \ - "Name for this model instance. If a model state from the same run ID was previously " - "saved, the training will restart from there. Pass -f to overwrite saved states and " - "restart from scratch.") - parser.add_argument("syn_dir", type=str, default=argparse.SUPPRESS, help= \ - "Path to the synthesizer directory that contains the ground truth mel spectrograms, " - "the wavs and the embeds.") - parser.add_argument("-m", "--models_dir", type=str, default="synthesizer/saved_models/", help=\ - "Path to the output directory that will contain the saved model weights and the logs.") - parser.add_argument("-s", "--save_every", type=int, default=1000, help= \ - "Number of steps between updates of the model on the disk. Set to 0 to never save the " - "model.") - parser.add_argument("-b", "--backup_every", type=int, default=25000, help= \ - "Number of steps between backups of the model. Set to 0 to never make backups of the " - "model.") - parser.add_argument("-f", "--force_restart", action="store_true", help= \ - "Do not load any saved model and restart from scratch.") - parser.add_argument("--hparams", default="", - help="Hyperparameter overrides as a comma-separated list of name=value " - "pairs") - args = parser.parse_args() - print_args(args, parser) - - args.hparams = hparams.parse(args.hparams) - - # Run the training - train(**vars(args)) diff --git a/spaces/kevinwang676/M4Singer/utils/audio.py b/spaces/kevinwang676/M4Singer/utils/audio.py deleted file mode 100644 index aba7ab926cf793d085bbdc70c97f376001183fe1..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/M4Singer/utils/audio.py +++ /dev/null @@ -1,56 +0,0 @@ -import subprocess -import matplotlib - -matplotlib.use('Agg') -import librosa -import librosa.filters -import numpy as np -from scipy import signal -from scipy.io import wavfile - - -def save_wav(wav, path, sr, norm=False): - if norm: - wav = wav / np.abs(wav).max() - wav *= 32767 - # proposed by @dsmiller - wavfile.write(path, sr, wav.astype(np.int16)) - - -def get_hop_size(hparams): - hop_size = hparams['hop_size'] - if hop_size is None: - assert hparams['frame_shift_ms'] is not None - hop_size = int(hparams['frame_shift_ms'] / 1000 * hparams['audio_sample_rate']) - return hop_size - - -########################################################################################### -def _stft(y, hparams): - return librosa.stft(y=y, n_fft=hparams['fft_size'], hop_length=get_hop_size(hparams), - win_length=hparams['win_size'], pad_mode='constant') - - -def _istft(y, hparams): - return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams['win_size']) - - -def librosa_pad_lr(x, fsize, fshift, pad_sides=1): - '''compute right padding (final frame) or both sides padding (first and final frames) - ''' - assert pad_sides in (1, 2) - # return int(fsize // 2) - pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] - if pad_sides == 1: - return 0, pad - else: - return pad // 2, pad // 2 + pad % 2 - - -# Conversions -def amp_to_db(x): - return 20 * np.log10(np.maximum(1e-5, x)) - - -def normalize(S, hparams): - return (S - hparams['min_level_db']) / -hparams['min_level_db'] diff --git a/spaces/kevinwang676/VoiceChanger/infer_pack/modules.py b/spaces/kevinwang676/VoiceChanger/infer_pack/modules.py deleted file mode 100644 index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/repeat.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/repeat.py deleted file mode 100644 index 7a8af6ce850e930feb2bf0cd0e9bc7a8d21520e4..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/repeat.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Copyright 2019 Shigeki Karita -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Repeat the same layer definition.""" - -import torch - - -class MultiSequential(torch.nn.Sequential): - """Multi-input multi-output torch.nn.Sequential.""" - - def forward(self, *args): - """Repeat.""" - for m in self: - args = m(*args) - return args - - -def repeat(N, fn): - """Repeat module N times. - - :param int N: repeat time - :param function fn: function to generate module - :return: repeated modules - :rtype: MultiSequential - """ - return MultiSequential(*[fn(n) for n in range(N)]) diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh b/spaces/koajoel/PolyFormer/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh deleted file mode 100644 index e3efeb21d302ef8d9eae8f1d4b06434c593705f6..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -echo 'Cloning Moses github repository (for tokenization scripts)...' -git clone https://github.com/moses-smt/mosesdecoder.git - -SCRIPTS=mosesdecoder/scripts -TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl -CLEAN=$SCRIPTS/training/clean-corpus-n.perl -REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl - -URLS=( - "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz" - "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz" - "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz" - "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz" - "http://data.statmt.org/wmt17/translation-task/dev.tgz" - "http://statmt.org/wmt14/test-full.tgz" -) -CORPORA=( - "training/europarl-v7.de-en" - "commoncrawl.de-en" - "training-parallel-nc-v13/news-commentary-v13.de-en" - "rapid2016.de-en" -) - -if [ ! -d "$SCRIPTS" ]; then - echo "Please set SCRIPTS variable correctly to point to Moses scripts." - exit -fi - -src=en -tgt=de -lang=en-de -prep=wmt18_en_de -tmp=$prep/tmp -orig=orig -dev=dev/newstest2012 -codes=32000 -bpe=bpe.32k - -mkdir -p $orig $tmp $prep $bpe - -cd $orig - -for ((i=0;i<${#URLS[@]};++i)); do - url=${URLS[i]} - file=$(basename $url) - if [ -f $file ]; then - echo "$file already exists, skipping download" - else - wget "$url" - if [ -f $file ]; then - echo "$url successfully downloaded." - else - echo "$url not successfully downloaded." - exit 1 - fi - if [ ${file: -4} == ".tgz" ]; then - tar zxvf $file - elif [ ${file: -4} == ".tar" ]; then - tar xvf $file - fi - fi -done -cd .. - -echo "pre-processing train data..." -for l in $src $tgt; do - rm -rf $tmp/train.tags.$lang.tok.$l - for f in "${CORPORA[@]}"; do - cat $orig/$f.$l | \ - perl $REM_NON_PRINT_CHAR | \ - perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/train.tags.$lang.tok.$l - done -done - -echo "pre-processing test data..." -for l in $src $tgt; do - if [ "$l" == "$src" ]; then - t="src" - else - t="ref" - fi - grep '\s*//g' | \ - sed -e 's/\s*<\/seg>\s*//g' | \ - sed -e "s/\’/\'/g" | \ - perl $TOKENIZER -threads 8 -l $l -no-escape > $tmp/test.$l - echo "" -done - -# apply length filtering before BPE -perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train 1 100 - -# use newstest2012 for valid -echo "pre-processing valid data..." -for l in $src $tgt; do - rm -rf $tmp/valid.$l - cat $orig/$dev.$l | \ - perl $REM_NON_PRINT_CHAR | \ - perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/valid.$l -done - -mkdir output -mv $tmp/{train,valid,test}.{$src,$tgt} output - -#BPE -git clone https://github.com/glample/fastBPE.git -pushd fastBPE -g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast -popd -fastBPE/fast learnbpe $codes output/train.$src output/train.$tgt > $bpe/codes -for split in {train,valid,test}; do for lang in {en,de}; do fastBPE/fast applybpe $bpe/$split.$lang output/$split.$lang $bpe/codes; done; done diff --git a/spaces/kukuhtw/AutoGPT/README.md b/spaces/kukuhtw/AutoGPT/README.md deleted file mode 100644 index 5bf09b995f04f7af05d1314906b1b1ff39c20ddc..0000000000000000000000000000000000000000 --- a/spaces/kukuhtw/AutoGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AutoGPT -emoji: 🦾 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.27.0 -app_file: ui/app.py -pinned: false -license: mit -duplicated_from: aliabid94/AutoGPT ---- - diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/resolver.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/resolver.py deleted file mode 100644 index 531ce93fccc2d3be442556de644cdc78d31d9c6e..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/resolver.py +++ /dev/null @@ -1,160 +0,0 @@ -import asyncio -import socket -from typing import Any, Dict, List, Optional, Type, Union - -from .abc import AbstractResolver -from .helpers import get_running_loop - -__all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver") - -try: - import aiodns - - # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname') -except ImportError: # pragma: no cover - aiodns = None - -aiodns_default = False - - -class ThreadedResolver(AbstractResolver): - """Threaded resolver. - - Uses an Executor for synchronous getaddrinfo() calls. - concurrent.futures.ThreadPoolExecutor is used by default. - """ - - def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: - self._loop = get_running_loop(loop) - - async def resolve( - self, hostname: str, port: int = 0, family: int = socket.AF_INET - ) -> List[Dict[str, Any]]: - infos = await self._loop.getaddrinfo( - hostname, - port, - type=socket.SOCK_STREAM, - family=family, - flags=socket.AI_ADDRCONFIG, - ) - - hosts = [] - for family, _, proto, _, address in infos: - if family == socket.AF_INET6: - if len(address) < 3: - # IPv6 is not supported by Python build, - # or IPv6 is not enabled in the host - continue - if address[3]: # type: ignore[misc] - # This is essential for link-local IPv6 addresses. - # LL IPv6 is a VERY rare case. Strictly speaking, we should use - # getnameinfo() unconditionally, but performance makes sense. - host, _port = socket.getnameinfo( - address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV - ) - port = int(_port) - else: - host, port = address[:2] - else: # IPv4 - assert family == socket.AF_INET - host, port = address # type: ignore[misc] - hosts.append( - { - "hostname": hostname, - "host": host, - "port": port, - "family": family, - "proto": proto, - "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV, - } - ) - - return hosts - - async def close(self) -> None: - pass - - -class AsyncResolver(AbstractResolver): - """Use the `aiodns` package to make asynchronous DNS lookups""" - - def __init__( - self, - loop: Optional[asyncio.AbstractEventLoop] = None, - *args: Any, - **kwargs: Any - ) -> None: - if aiodns is None: - raise RuntimeError("Resolver requires aiodns library") - - self._loop = get_running_loop(loop) - self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs) - - if not hasattr(self._resolver, "gethostbyname"): - # aiodns 1.1 is not available, fallback to DNSResolver.query - self.resolve = self._resolve_with_query # type: ignore - - async def resolve( - self, host: str, port: int = 0, family: int = socket.AF_INET - ) -> List[Dict[str, Any]]: - try: - resp = await self._resolver.gethostbyname(host, family) - except aiodns.error.DNSError as exc: - msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" - raise OSError(msg) from exc - hosts = [] - for address in resp.addresses: - hosts.append( - { - "hostname": host, - "host": address, - "port": port, - "family": family, - "proto": 0, - "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV, - } - ) - - if not hosts: - raise OSError("DNS lookup failed") - - return hosts - - async def _resolve_with_query( - self, host: str, port: int = 0, family: int = socket.AF_INET - ) -> List[Dict[str, Any]]: - if family == socket.AF_INET6: - qtype = "AAAA" - else: - qtype = "A" - - try: - resp = await self._resolver.query(host, qtype) - except aiodns.error.DNSError as exc: - msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" - raise OSError(msg) from exc - - hosts = [] - for rr in resp: - hosts.append( - { - "hostname": host, - "host": rr.host, - "port": port, - "family": family, - "proto": 0, - "flags": socket.AI_NUMERICHOST, - } - ) - - if not hosts: - raise OSError("DNS lookup failed") - - return hosts - - async def close(self) -> None: - self._resolver.cancel() - - -_DefaultType = Type[Union[AsyncResolver, ThreadedResolver]] -DefaultResolver: _DefaultType = AsyncResolver if aiodns_default else ThreadedResolver diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/github.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/github.py deleted file mode 100644 index b148124d7481bb867cb100ad1ab2213e6acadf56..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/github.py +++ /dev/null @@ -1,219 +0,0 @@ -import requests - -from ..spec import AbstractFileSystem -from ..utils import infer_storage_options -from .memory import MemoryFile - -# TODO: add GIST backend, would be very similar - - -class GithubFileSystem(AbstractFileSystem): - """Interface to files in github - - An instance of this class provides the files residing within a remote github - repository. You may specify a point in the repos history, by SHA, branch - or tag (default is current master). - - Given that code files tend to be small, and that github does not support - retrieving partial content, we always fetch whole files. - - When using fsspec.open, allows URIs of the form: - - - "github://path/file", in which case you must specify org, repo and - may specify sha in the extra args - - 'github://org:repo@/precip/catalog.yml', where the org and repo are - part of the URI - - 'github://org:repo@sha/precip/catalog.yml', where the sha is also included - - ``sha`` can be the full or abbreviated hex of the commit you want to fetch - from, or a branch or tag name (so long as it doesn't contain special characters - like "/", "?", which would have to be HTTP-encoded). - - For authorised access, you must provide username and token, which can be made - at https://github.com/settings/tokens - """ - - url = "https://api.github.com/repos/{org}/{repo}/git/trees/{sha}" - rurl = "https://raw.githubusercontent.com/{org}/{repo}/{sha}/{path}" - protocol = "github" - - def __init__(self, org, repo, sha=None, username=None, token=None, **kwargs): - super().__init__(**kwargs) - self.org = org - self.repo = repo - if (username is None) ^ (token is None): - raise ValueError("Auth required both username and token") - self.username = username - self.token = token - if sha is None: - # look up default branch (not necessarily "master") - u = "https://api.github.com/repos/{org}/{repo}" - r = requests.get(u.format(org=org, repo=repo), **self.kw) - r.raise_for_status() - sha = r.json()["default_branch"] - - self.root = sha - self.ls("") - - @property - def kw(self): - if self.username: - return {"auth": (self.username, self.token)} - return {} - - @classmethod - def repos(cls, org_or_user, is_org=True): - """List repo names for given org or user - - This may become the top level of the FS - - Parameters - ---------- - org_or_user: str - Name of the github org or user to query - is_org: bool (default True) - Whether the name is an organisation (True) or user (False) - - Returns - ------- - List of string - """ - r = requests.get( - "https://api.github.com/{part}/{org}/repos".format( - part=["users", "orgs"][is_org], org=org_or_user - ) - ) - r.raise_for_status() - return [repo["name"] for repo in r.json()] - - @property - def tags(self): - """Names of tags in the repo""" - r = requests.get( - "https://api.github.com/repos/{org}/{repo}/tags" - "".format(org=self.org, repo=self.repo), - **self.kw, - ) - r.raise_for_status() - return [t["name"] for t in r.json()] - - @property - def branches(self): - """Names of branches in the repo""" - r = requests.get( - "https://api.github.com/repos/{org}/{repo}/branches" - "".format(org=self.org, repo=self.repo), - **self.kw, - ) - r.raise_for_status() - return [t["name"] for t in r.json()] - - @property - def refs(self): - """Named references, tags and branches""" - return {"tags": self.tags, "branches": self.branches} - - def ls(self, path, detail=False, sha=None, _sha=None, **kwargs): - """List files at given path - - Parameters - ---------- - path: str - Location to list, relative to repo root - detail: bool - If True, returns list of dicts, one per file; if False, returns - list of full filenames only - sha: str (optional) - List at the given point in the repo history, branch or tag name or commit - SHA - _sha: str (optional) - List this specific tree object (used internally to descend into trees) - """ - path = self._strip_protocol(path) - if path == "": - _sha = sha or self.root - if _sha is None: - parts = path.rstrip("/").split("/") - so_far = "" - _sha = sha or self.root - for part in parts: - out = self.ls(so_far, True, sha=sha, _sha=_sha) - so_far += "/" + part if so_far else part - out = [o for o in out if o["name"] == so_far] - if not out: - raise FileNotFoundError(path) - out = out[0] - if out["type"] == "file": - if detail: - return [out] - else: - return path - _sha = out["sha"] - if path not in self.dircache or sha not in [self.root, None]: - r = requests.get( - self.url.format(org=self.org, repo=self.repo, sha=_sha), **self.kw - ) - if r.status_code == 404: - raise FileNotFoundError(path) - r.raise_for_status() - types = {"blob": "file", "tree": "directory"} - out = [ - { - "name": path + "/" + f["path"] if path else f["path"], - "mode": f["mode"], - "type": types[f["type"]], - "size": f.get("size", 0), - "sha": f["sha"], - } - for f in r.json()["tree"] - if f["type"] in types - ] - if sha in [self.root, None]: - self.dircache[path] = out - else: - out = self.dircache[path] - if detail: - return out - else: - return sorted([f["name"] for f in out]) - - def invalidate_cache(self, path=None): - self.dircache.clear() - - @classmethod - def _strip_protocol(cls, path): - opts = infer_storage_options(path) - if "username" not in opts: - return super()._strip_protocol(path) - return opts["path"].lstrip("/") - - @staticmethod - def _get_kwargs_from_urls(path): - opts = infer_storage_options(path) - if "username" not in opts: - return {} - out = {"org": opts["username"], "repo": opts["password"]} - if opts["host"]: - out["sha"] = opts["host"] - return out - - def _open( - self, - path, - mode="rb", - block_size=None, - autocommit=True, - cache_options=None, - sha=None, - **kwargs, - ): - if mode != "rb": - raise NotImplementedError - url = self.rurl.format( - org=self.org, repo=self.repo, path=path, sha=sha or self.root - ) - r = requests.get(url, **self.kw) - if r.status_code == 404: - raise FileNotFoundError(path) - r.raise_for_status() - return MemoryFile(None, None, r.content) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/exceptions.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/exceptions.py deleted file mode 100644 index 87db3df3a6dde1bbc0aae1128ca21f365e774666..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/exceptions.py +++ /dev/null @@ -1,396 +0,0 @@ -""" -Validation errors, and some surrounding helpers. -""" -from __future__ import annotations - -from collections import defaultdict, deque -from pprint import pformat -from textwrap import dedent, indent -import heapq -import itertools - -import attr - -from jsonschema import _utils - -WEAK_MATCHES: frozenset[str] = frozenset(["anyOf", "oneOf"]) -STRONG_MATCHES: frozenset[str] = frozenset() - -_unset = _utils.Unset() - - -class _Error(Exception): - def __init__( - self, - message, - validator=_unset, - path=(), - cause=None, - context=(), - validator_value=_unset, - instance=_unset, - schema=_unset, - schema_path=(), - parent=None, - type_checker=_unset, - ): - super(_Error, self).__init__( - message, - validator, - path, - cause, - context, - validator_value, - instance, - schema, - schema_path, - parent, - ) - self.message = message - self.path = self.relative_path = deque(path) - self.schema_path = self.relative_schema_path = deque(schema_path) - self.context = list(context) - self.cause = self.__cause__ = cause - self.validator = validator - self.validator_value = validator_value - self.instance = instance - self.schema = schema - self.parent = parent - self._type_checker = type_checker - - for error in context: - error.parent = self - - def __repr__(self): - return f"<{self.__class__.__name__}: {self.message!r}>" - - def __str__(self): - essential_for_verbose = ( - self.validator, self.validator_value, self.instance, self.schema, - ) - if any(m is _unset for m in essential_for_verbose): - return self.message - - schema_path = _utils.format_as_index( - container=self._word_for_schema_in_error_message, - indices=list(self.relative_schema_path)[:-1], - ) - instance_path = _utils.format_as_index( - container=self._word_for_instance_in_error_message, - indices=self.relative_path, - ) - prefix = 16 * " " - - return dedent( - f"""\ - {self.message} - - Failed validating {self.validator!r} in {schema_path}: - {indent(pformat(self.schema, width=72), prefix).lstrip()} - - On {instance_path}: - {indent(pformat(self.instance, width=72), prefix).lstrip()} - """.rstrip(), - ) - - @classmethod - def create_from(cls, other): - return cls(**other._contents()) - - @property - def absolute_path(self): - parent = self.parent - if parent is None: - return self.relative_path - - path = deque(self.relative_path) - path.extendleft(reversed(parent.absolute_path)) - return path - - @property - def absolute_schema_path(self): - parent = self.parent - if parent is None: - return self.relative_schema_path - - path = deque(self.relative_schema_path) - path.extendleft(reversed(parent.absolute_schema_path)) - return path - - @property - def json_path(self): - path = "$" - for elem in self.absolute_path: - if isinstance(elem, int): - path += "[" + str(elem) + "]" - else: - path += "." + elem - return path - - def _set(self, type_checker=None, **kwargs): - if type_checker is not None and self._type_checker is _unset: - self._type_checker = type_checker - - for k, v in kwargs.items(): - if getattr(self, k) is _unset: - setattr(self, k, v) - - def _contents(self): - attrs = ( - "message", "cause", "context", "validator", "validator_value", - "path", "schema_path", "instance", "schema", "parent", - ) - return dict((attr, getattr(self, attr)) for attr in attrs) - - def _matches_type(self): - try: - expected = self.schema["type"] - except (KeyError, TypeError): - return False - - if isinstance(expected, str): - return self._type_checker.is_type(self.instance, expected) - - return any( - self._type_checker.is_type(self.instance, expected_type) - for expected_type in expected - ) - - -class ValidationError(_Error): - """ - An instance was invalid under a provided schema. - """ - - _word_for_schema_in_error_message = "schema" - _word_for_instance_in_error_message = "instance" - - -class SchemaError(_Error): - """ - A schema was invalid under its corresponding metaschema. - """ - - _word_for_schema_in_error_message = "metaschema" - _word_for_instance_in_error_message = "schema" - - -@attr.s(hash=True) -class RefResolutionError(Exception): - """ - A ref could not be resolved. - """ - - _cause = attr.ib() - - def __str__(self): - return str(self._cause) - - -class UndefinedTypeCheck(Exception): - """ - A type checker was asked to check a type it did not have registered. - """ - - def __init__(self, type): - self.type = type - - def __str__(self): - return f"Type {self.type!r} is unknown to this type checker" - - -class UnknownType(Exception): - """ - A validator was asked to validate an instance against an unknown type. - """ - - def __init__(self, type, instance, schema): - self.type = type - self.instance = instance - self.schema = schema - - def __str__(self): - prefix = 16 * " " - - return dedent( - f"""\ - Unknown type {self.type!r} for validator with schema: - {indent(pformat(self.schema, width=72), prefix).lstrip()} - - While checking instance: - {indent(pformat(self.instance, width=72), prefix).lstrip()} - """.rstrip(), - ) - - -class FormatError(Exception): - """ - Validating a format failed. - """ - - def __init__(self, message, cause=None): - super(FormatError, self).__init__(message, cause) - self.message = message - self.cause = self.__cause__ = cause - - def __str__(self): - return self.message - - -class ErrorTree: - """ - ErrorTrees make it easier to check which validations failed. - """ - - _instance = _unset - - def __init__(self, errors=()): - self.errors = {} - self._contents = defaultdict(self.__class__) - - for error in errors: - container = self - for element in error.path: - container = container[element] - container.errors[error.validator] = error - - container._instance = error.instance - - def __contains__(self, index): - """ - Check whether ``instance[index]`` has any errors. - """ - - return index in self._contents - - def __getitem__(self, index): - """ - Retrieve the child tree one level down at the given ``index``. - - If the index is not in the instance that this tree corresponds - to and is not known by this tree, whatever error would be raised - by ``instance.__getitem__`` will be propagated (usually this is - some subclass of `LookupError`. - """ - - if self._instance is not _unset and index not in self: - self._instance[index] - return self._contents[index] - - def __setitem__(self, index, value): - """ - Add an error to the tree at the given ``index``. - """ - self._contents[index] = value - - def __iter__(self): - """ - Iterate (non-recursively) over the indices in the instance with errors. - """ - - return iter(self._contents) - - def __len__(self): - """ - Return the `total_errors`. - """ - return self.total_errors - - def __repr__(self): - total = len(self) - errors = "error" if total == 1 else "errors" - return f"<{self.__class__.__name__} ({total} total {errors})>" - - @property - def total_errors(self): - """ - The total number of errors in the entire tree, including children. - """ - - child_errors = sum(len(tree) for _, tree in self._contents.items()) - return len(self.errors) + child_errors - - -def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES): - """ - Create a key function that can be used to sort errors by relevance. - - Arguments: - weak (set): - a collection of validation keywords to consider to be - "weak". If there are two errors at the same level of the - instance and one is in the set of weak validation keywords, - the other error will take priority. By default, :kw:`anyOf` - and :kw:`oneOf` are considered weak keywords and will be - superseded by other same-level validation errors. - - strong (set): - a collection of validation keywords to consider to be - "strong" - """ - def relevance(error): - validator = error.validator - return ( - -len(error.path), - validator not in weak, - validator in strong, - not error._matches_type(), - ) - return relevance - - -relevance = by_relevance() - - -def best_match(errors, key=relevance): - """ - Try to find an error that appears to be the best match among given errors. - - In general, errors that are higher up in the instance (i.e. for which - `ValidationError.path` is shorter) are considered better matches, - since they indicate "more" is wrong with the instance. - - If the resulting match is either :kw:`oneOf` or :kw:`anyOf`, the - *opposite* assumption is made -- i.e. the deepest error is picked, - since these keywords only need to match once, and any other errors - may not be relevant. - - Arguments: - errors (collections.abc.Iterable): - - the errors to select from. Do not provide a mixture of - errors from different validation attempts (i.e. from - different instances or schemas), since it won't produce - sensical output. - - key (collections.abc.Callable): - - the key to use when sorting errors. See `relevance` and - transitively `by_relevance` for more details (the default is - to sort with the defaults of that function). Changing the - default is only useful if you want to change the function - that rates errors but still want the error context descent - done by this function. - - Returns: - the best matching error, or ``None`` if the iterable was empty - - .. note:: - - This function is a heuristic. Its return value may change for a given - set of inputs from version to version if better heuristics are added. - """ - errors = iter(errors) - best = next(errors, None) - if best is None: - return - best = max(itertools.chain([best], errors), key=key) - - while best.context: - # Calculate the minimum via nsmallest, because we don't recurse if - # all nested errors have the same relevance (i.e. if min == max == all) - smallest = heapq.nsmallest(2, best.context, key=key) - if len(smallest) == 2 and key(smallest[0]) == key(smallest[1]): - return best - best = smallest[0] - return best diff --git a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/data/transforms.py b/spaces/leafShen/CodeFormer/CodeFormer/basicsr/data/transforms.py deleted file mode 100644 index aead9dc73ed063e1c5865040eaa2652b26aa3ad3..0000000000000000000000000000000000000000 --- a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/data/transforms.py +++ /dev/null @@ -1,165 +0,0 @@ -import cv2 -import random - - -def mod_crop(img, scale): - """Mod crop images, used during testing. - - Args: - img (ndarray): Input image. - scale (int): Scale factor. - - Returns: - ndarray: Result image. - """ - img = img.copy() - if img.ndim in (2, 3): - h, w = img.shape[0], img.shape[1] - h_remainder, w_remainder = h % scale, w % scale - img = img[:h - h_remainder, :w - w_remainder, ...] - else: - raise ValueError(f'Wrong img ndim: {img.ndim}.') - return img - - -def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path): - """Paired random crop. - - It crops lists of lq and gt images with corresponding locations. - - Args: - img_gts (list[ndarray] | ndarray): GT images. Note that all images - should have the same shape. If the input is an ndarray, it will - be transformed to a list containing itself. - img_lqs (list[ndarray] | ndarray): LQ images. Note that all images - should have the same shape. If the input is an ndarray, it will - be transformed to a list containing itself. - gt_patch_size (int): GT patch size. - scale (int): Scale factor. - gt_path (str): Path to ground-truth. - - Returns: - list[ndarray] | ndarray: GT images and LQ images. If returned results - only have one element, just return ndarray. - """ - - if not isinstance(img_gts, list): - img_gts = [img_gts] - if not isinstance(img_lqs, list): - img_lqs = [img_lqs] - - h_lq, w_lq, _ = img_lqs[0].shape - h_gt, w_gt, _ = img_gts[0].shape - lq_patch_size = gt_patch_size // scale - - if h_gt != h_lq * scale or w_gt != w_lq * scale: - raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ', - f'multiplication of LQ ({h_lq}, {w_lq}).') - if h_lq < lq_patch_size or w_lq < lq_patch_size: - raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size ' - f'({lq_patch_size}, {lq_patch_size}). ' - f'Please remove {gt_path}.') - - # randomly choose top and left coordinates for lq patch - top = random.randint(0, h_lq - lq_patch_size) - left = random.randint(0, w_lq - lq_patch_size) - - # crop lq patch - img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs] - - # crop corresponding gt patch - top_gt, left_gt = int(top * scale), int(left * scale) - img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts] - if len(img_gts) == 1: - img_gts = img_gts[0] - if len(img_lqs) == 1: - img_lqs = img_lqs[0] - return img_gts, img_lqs - - -def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False): - """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). - - We use vertical flip and transpose for rotation implementation. - All the images in the list use the same augmentation. - - Args: - imgs (list[ndarray] | ndarray): Images to be augmented. If the input - is an ndarray, it will be transformed to a list. - hflip (bool): Horizontal flip. Default: True. - rotation (bool): Ratotation. Default: True. - flows (list[ndarray]: Flows to be augmented. If the input is an - ndarray, it will be transformed to a list. - Dimension is (h, w, 2). Default: None. - return_status (bool): Return the status of flip and rotation. - Default: False. - - Returns: - list[ndarray] | ndarray: Augmented images and flows. If returned - results only have one element, just return ndarray. - - """ - hflip = hflip and random.random() < 0.5 - vflip = rotation and random.random() < 0.5 - rot90 = rotation and random.random() < 0.5 - - def _augment(img): - if hflip: # horizontal - cv2.flip(img, 1, img) - if vflip: # vertical - cv2.flip(img, 0, img) - if rot90: - img = img.transpose(1, 0, 2) - return img - - def _augment_flow(flow): - if hflip: # horizontal - cv2.flip(flow, 1, flow) - flow[:, :, 0] *= -1 - if vflip: # vertical - cv2.flip(flow, 0, flow) - flow[:, :, 1] *= -1 - if rot90: - flow = flow.transpose(1, 0, 2) - flow = flow[:, :, [1, 0]] - return flow - - if not isinstance(imgs, list): - imgs = [imgs] - imgs = [_augment(img) for img in imgs] - if len(imgs) == 1: - imgs = imgs[0] - - if flows is not None: - if not isinstance(flows, list): - flows = [flows] - flows = [_augment_flow(flow) for flow in flows] - if len(flows) == 1: - flows = flows[0] - return imgs, flows - else: - if return_status: - return imgs, (hflip, vflip, rot90) - else: - return imgs - - -def img_rotate(img, angle, center=None, scale=1.0): - """Rotate image. - - Args: - img (ndarray): Image to be rotated. - angle (float): Rotation angle in degrees. Positive values mean - counter-clockwise rotation. - center (tuple[int]): Rotation center. If the center is None, - initialize it as the center of the image. Default: None. - scale (float): Isotropic scale factor. Default: 1.0. - """ - (h, w) = img.shape[:2] - - if center is None: - center = (w // 2, h // 2) - - matrix = cv2.getRotationMatrix2D(center, angle, scale) - rotated_img = cv2.warpAffine(img, matrix, (w, h)) - return rotated_img diff --git a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/utils/realesrgan_utils.py b/spaces/leafShen/CodeFormer/CodeFormer/basicsr/utils/realesrgan_utils.py deleted file mode 100644 index 5a7b159b697d9e1ca0c51900ec1fe01f9feeb18f..0000000000000000000000000000000000000000 --- a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/utils/realesrgan_utils.py +++ /dev/null @@ -1,301 +0,0 @@ -import cv2 -import math -import numpy as np -import os -import queue -import threading -import torch -from basicsr.utils.download_util import load_file_from_url -from torch.nn import functional as F - -# ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - - -class RealESRGANer(): - """A helper class for upsampling images with RealESRGAN. - - Args: - scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4. - model_path (str): The path to the pretrained model. It can be urls (will first download it automatically). - model (nn.Module): The defined network. Default: None. - tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop - input images into tiles, and then process each of them. Finally, they will be merged into one image. - 0 denotes for do not use tile. Default: 0. - tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10. - pre_pad (int): Pad the input images to avoid border artifacts. Default: 10. - half (float): Whether to use half precision during inference. Default: False. - """ - - def __init__(self, - scale, - model_path, - model=None, - tile=0, - tile_pad=10, - pre_pad=10, - half=False, - device=None, - gpu_id=None): - self.scale = scale - self.tile_size = tile - self.tile_pad = tile_pad - self.pre_pad = pre_pad - self.mod_scale = None - self.half = half - - # initialize model - if gpu_id: - self.device = torch.device( - f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu') if device is None else device - else: - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device - # if the model_path starts with https, it will first download models to the folder: realesrgan/weights - if model_path.startswith('https://'): - model_path = load_file_from_url( - url=model_path, model_dir=os.path.join('weights/realesrgan'), progress=True, file_name=None) - loadnet = torch.load(model_path, map_location=torch.device('cpu')) - # prefer to use params_ema - if 'params_ema' in loadnet: - keyname = 'params_ema' - else: - keyname = 'params' - model.load_state_dict(loadnet[keyname], strict=True) - model.eval() - self.model = model.to(self.device) - if self.half: - self.model = self.model.half() - - def pre_process(self, img): - """Pre-process, such as pre-pad and mod pad, so that the images can be divisible - """ - img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float() - self.img = img.unsqueeze(0).to(self.device) - if self.half: - self.img = self.img.half() - - # pre_pad - self.img_pre_pad = self.img.clone() - if self.pre_pad != 0: - self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect') - # mod pad for divisible borders - if self.scale == 2: - self.mod_scale = 2 - elif self.scale == 1: - self.mod_scale = 4 - if self.mod_scale is not None: - self.mod_pad_h, self.mod_pad_w = 0, 0 - _, _, h, w = self.img.size() - if (h % self.mod_scale != 0): - self.mod_pad_h = (self.mod_scale - h % self.mod_scale) - if (w % self.mod_scale != 0): - self.mod_pad_w = (self.mod_scale - w % self.mod_scale) - self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect') - - def process(self): - # model inference - self.output = self.model(self.img) - - def tile_process(self): - """It will first crop input images to tiles, and then process each tile. - Finally, all the processed tiles are merged into one images. - - Modified from: https://github.com/ata4/esrgan-launcher - """ - batch, channel, height, width = self.img.shape - output_height = height * self.scale - output_width = width * self.scale - output_shape = (batch, channel, output_height, output_width) - - # start with black image - self.output = self.img.new_zeros(output_shape) - tiles_x = math.ceil(width / self.tile_size) - tiles_y = math.ceil(height / self.tile_size) - - # loop over all tiles - for y in range(tiles_y): - for x in range(tiles_x): - # extract tile from input image - ofs_x = x * self.tile_size - ofs_y = y * self.tile_size - # input tile area on total image - input_start_x = ofs_x - input_end_x = min(ofs_x + self.tile_size, width) - input_start_y = ofs_y - input_end_y = min(ofs_y + self.tile_size, height) - - # input tile area on total image with padding - input_start_x_pad = max(input_start_x - self.tile_pad, 0) - input_end_x_pad = min(input_end_x + self.tile_pad, width) - input_start_y_pad = max(input_start_y - self.tile_pad, 0) - input_end_y_pad = min(input_end_y + self.tile_pad, height) - - # input tile dimensions - input_tile_width = input_end_x - input_start_x - input_tile_height = input_end_y - input_start_y - tile_idx = y * tiles_x + x + 1 - input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad] - - # upscale tile - try: - with torch.no_grad(): - output_tile = self.model(input_tile) - except RuntimeError as error: - print('Error', error) - # print(f'\tTile {tile_idx}/{tiles_x * tiles_y}') - - # output tile area on total image - output_start_x = input_start_x * self.scale - output_end_x = input_end_x * self.scale - output_start_y = input_start_y * self.scale - output_end_y = input_end_y * self.scale - - # output tile area without padding - output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale - output_end_x_tile = output_start_x_tile + input_tile_width * self.scale - output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale - output_end_y_tile = output_start_y_tile + input_tile_height * self.scale - - # put tile into output image - self.output[:, :, output_start_y:output_end_y, - output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile, - output_start_x_tile:output_end_x_tile] - - def post_process(self): - # remove extra pad - if self.mod_scale is not None: - _, _, h, w = self.output.size() - self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale] - # remove prepad - if self.pre_pad != 0: - _, _, h, w = self.output.size() - self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale] - return self.output - - @torch.no_grad() - def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'): - h_input, w_input = img.shape[0:2] - # img: numpy - img = img.astype(np.float32) - if np.max(img) > 256: # 16-bit image - max_range = 65535 - print('\tInput is a 16-bit image') - else: - max_range = 255 - img = img / max_range - if len(img.shape) == 2: # gray image - img_mode = 'L' - img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) - elif img.shape[2] == 4: # RGBA image with alpha channel - img_mode = 'RGBA' - alpha = img[:, :, 3] - img = img[:, :, 0:3] - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - if alpha_upsampler == 'realesrgan': - alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB) - else: - img_mode = 'RGB' - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - # ------------------- process image (without the alpha channel) ------------------- # - try: - with torch.no_grad(): - self.pre_process(img) - if self.tile_size > 0: - self.tile_process() - else: - self.process() - output_img_t = self.post_process() - output_img = output_img_t.data.squeeze().float().cpu().clamp_(0, 1).numpy() - output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0)) - if img_mode == 'L': - output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY) - del output_img_t - torch.cuda.empty_cache() - except RuntimeError as error: - output_img = cv2.resize(self.img_pre_pad, (w_input * self.scale, h_input * self.scale), interpolation=cv2.INTER_LINEAR) - print(f"Failed inference for RealESRGAN: {error}") - - # ------------------- process the alpha channel if necessary ------------------- # - if img_mode == 'RGBA': - if alpha_upsampler == 'realesrgan': - self.pre_process(alpha) - if self.tile_size > 0: - self.tile_process() - else: - self.process() - output_alpha = self.post_process() - output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy() - output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0)) - output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY) - else: # use the cv2 resize for alpha channel - h, w = alpha.shape[0:2] - output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR) - - # merge the alpha channel - output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA) - output_img[:, :, 3] = output_alpha - - # ------------------------------ return ------------------------------ # - if max_range == 65535: # 16-bit image - output = (output_img * 65535.0).round().astype(np.uint16) - else: - output = (output_img * 255.0).round().astype(np.uint8) - - if outscale is not None and outscale != float(self.scale): - output = cv2.resize( - output, ( - int(w_input * outscale), - int(h_input * outscale), - ), interpolation=cv2.INTER_LANCZOS4) - - return output, img_mode - - -class PrefetchReader(threading.Thread): - """Prefetch images. - - Args: - img_list (list[str]): A image list of image paths to be read. - num_prefetch_queue (int): Number of prefetch queue. - """ - - def __init__(self, img_list, num_prefetch_queue): - super().__init__() - self.que = queue.Queue(num_prefetch_queue) - self.img_list = img_list - - def run(self): - for img_path in self.img_list: - img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) - self.que.put(img) - - self.que.put(None) - - def __next__(self): - next_item = self.que.get() - if next_item is None: - raise StopIteration - return next_item - - def __iter__(self): - return self - - -class IOConsumer(threading.Thread): - - def __init__(self, opt, que, qid): - super().__init__() - self._queue = que - self.qid = qid - self.opt = opt - - def run(self): - while True: - msg = self._queue.get() - if isinstance(msg, str) and msg == 'quit': - break - - output = msg['output'] - save_path = msg['save_path'] - cv2.imwrite(save_path, output) - print(f'IO worker {self.qid} is done.') \ No newline at end of file diff --git a/spaces/leave7/kazunaAI2.0/data_utils.py b/spaces/leave7/kazunaAI2.0/data_utils.py deleted file mode 100644 index 9dfba4a9dfbfbd2b6ed5e771a5ffee4f70419ba3..0000000000000000000000000000000000000000 --- a/spaces/leave7/kazunaAI2.0/data_utils.py +++ /dev/null @@ -1,152 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import commons -from mel_processing import spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text, transform - -# import h5py - - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.spec_len = hparams.train.max_speclen - self.spk_map = hparams.spk - - random.seed(1234) - random.shuffle(self.audiopaths) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split(os.sep)[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - c = torch.load(filename + ".soft.pt").squeeze(0) - c = torch.repeat_interleave(c, repeats=2, dim=1) - - f0 = np.load(filename + ".f0.npy") - f0 = torch.FloatTensor(f0) - lmin = min(c.size(-1), spec.size(-1), f0.shape[0]) - assert abs(c.size(-1) - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape, filename) - assert abs(lmin - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - assert abs(lmin - c.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - spec, c, f0 = spec[:, :lmin], c[:, :lmin], f0[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - _spec, _c, _audio_norm, _f0 = spec, c, audio_norm, f0 - while spec.size(-1) < self.spec_len: - spec = torch.cat((spec, _spec), -1) - c = torch.cat((c, _c), -1) - f0 = torch.cat((f0, _f0), -1) - audio_norm = torch.cat((audio_norm, _audio_norm), -1) - start = random.randint(0, spec.size(-1) - self.spec_len) - end = start + self.spec_len - spec = spec[:, start:end] - c = c[:, start:end] - f0 = f0[start:end] - audio_norm = audio_norm[:, start * self.hop_length:end * self.hop_length] - - return c, f0, spec, audio_norm, spk - - def __getitem__(self, index): - return self.get_audio(self.audiopaths[index][0]) - - def __len__(self): - return len(self.audiopaths) - - -class EvalDataLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.audiopaths = self.audiopaths[:5] - self.spk_map = hparams.spk - - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split(os.sep)[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - c = torch.load(filename + ".soft.pt").squeeze(0) - - c = torch.repeat_interleave(c, repeats=2, dim=1) - - f0 = np.load(filename + ".f0.npy") - f0 = torch.FloatTensor(f0) - lmin = min(c.size(-1), spec.size(-1), f0.shape[0]) - assert abs(c.size(-1) - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - assert abs(f0.shape[0] - spec.shape[-1]) < 4, (c.size(-1), spec.size(-1), f0.shape) - spec, c, f0 = spec[:, :lmin], c[:, :lmin], f0[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - - return c, f0, spec, audio_norm, spk - - def __getitem__(self, index): - return self.get_audio(self.audiopaths[index][0]) - - def __len__(self): - return len(self.audiopaths) - diff --git a/spaces/lewisliuX123/wechatllama2/docker/build.debian.sh b/spaces/lewisliuX123/wechatllama2/docker/build.debian.sh deleted file mode 100644 index a5285f39813426a2d63eb01982229b23ec09dba2..0000000000000000000000000000000000000000 --- a/spaces/lewisliuX123/wechatllama2/docker/build.debian.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -CHATGPT_ON_WECHAT_TAG=1.0.2 - -docker build -f Dockerfile.debian \ - --build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \ - -t zhayujie/chatgpt-on-wechat . - -docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-debian \ No newline at end of file diff --git a/spaces/lewiswu1209/MockingBird/vocoder/hifigan/env.py b/spaces/lewiswu1209/MockingBird/vocoder/hifigan/env.py deleted file mode 100644 index 8f0d306d518d0d86a40d7ee992fbad6f04fe875f..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/vocoder/hifigan/env.py +++ /dev/null @@ -1,8 +0,0 @@ -import os -import shutil - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Ankhon Dekhi Movie Download 720p Torrents.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Ankhon Dekhi Movie Download 720p Torrents.md deleted file mode 100644 index c8f70e8cb68bfbea68f58db951158120038cde82..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Ankhon Dekhi Movie Download 720p Torrents.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Ankhon Dekhi Movie Download 720p Torrents


      Download Ziphttps://bytlly.com/2uGvOV



      - -Ankhon Dekhi Full Movie Download 720p Kickass Torrent Ankhon Dekhi (2014) Full Hindi . Full Hindi Movie Watch Online And Download HD . 1fdad05405
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/City Car Driving 1.4.1 Crack [BETTER].md b/spaces/lincquiQcaudo/Top-20-Diffusion/City Car Driving 1.4.1 Crack [BETTER].md deleted file mode 100644 index 8a6e9c4edd81d0c5990d8ef3842bf2b0178135e4..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/City Car Driving 1.4.1 Crack [BETTER].md +++ /dev/null @@ -1,6 +0,0 @@ -

      city car driving 1.4.1 crack


      DOWNLOAD 🗸 https://bytlly.com/2uGyKj



      - -City Car Driving Home Edition 1. Setelah proses instal selesai masukan serial number. Copy cracknya ke folder tempat city car driving yang. Terlebih dahulu ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free Download !!LINK!!.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free Download !!LINK!!.md deleted file mode 100644 index 2ee319080ebf62bf788fa0e699a9d400ad834df3..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free Download !!LINK!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free Download


      DOWNLOAD ►►►►► https://bytlly.com/2uGxxV



      - -Free download cracked Adobe Acrobat XI Pro Final full version with torrent ... Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Isumsoft Windows Password Refixer Ultimate Crack !EXCLUSIVE!.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Isumsoft Windows Password Refixer Ultimate Crack !EXCLUSIVE!.md deleted file mode 100644 index 83b636e04a82f7b95973dfe768b196ae32b45f0d..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Isumsoft Windows Password Refixer Ultimate Crack !EXCLUSIVE!.md +++ /dev/null @@ -1,12 +0,0 @@ -

      isumsoft windows password refixer ultimate crack


      Download ->->->-> https://bytlly.com/2uGxCK



      -
      -12 Apr 2020 - Windows Password Recovery Tool Ultimate Crack Free Download The world's first home windows password recovery software to reset ... Windows Password Recovery Tool - free download Windows ... -13 Jul 2017 ... -Download Windows Password Recovery Tool - https://goo.gl/NVqnvF. -In this video I will show you how to recover a deleted password on Windows 7 ... -6 Jul 2015 ... -Download Windows Password Recovery Tool 1.0 [Crack] - https://goo.gl/vx2fT5 Windows Password Recovery Tool 1.0 is the first and only, fastest and most complete tool to recover forgotten passwords ... -Windows Password Recovery Tool - download free ... 8a78ff9644
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator FSX 737 Pilot In Command [English] [P Bot.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator FSX 737 Pilot In Command [English] [P Bot.md deleted file mode 100644 index e9ba78702c347ce4e50b01e945b94b6ce384f880..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator FSX 737 Pilot In Command [English] [P Bot.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Microsoft Flight Simulator FSX 737 Pilot In Command [English] [P bot


      Download Filehttps://bytlly.com/2uGx2Z



      - -112 accident investigations, including Air Algerie, a Boeing 737 ... A longer chain of events, as analyzed downstream (or at the bot- ... taxied into gate C-3, and because the pilot in command did not ... conducted in simulators instead of an actual airplane. ... British Military Aircraft Accidents: The Last 25 Years, Ian Allen. 1fdad05405
      -
      -
      -

      diff --git a/spaces/lixq/bingo61/Dockerfile b/spaces/lixq/bingo61/Dockerfile deleted file mode 100644 index 3aa2b29b5fc4fa8b8238955acd7f1fde13ce5e1a..0000000000000000000000000000000000000000 --- a/spaces/lixq/bingo61/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM node:18 - - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set up a new user named "user" with user ID 1000 -RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME - -# Switch to the "user" user -USER user - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Install app dependencies -# A wildcard is used to ensure both package.json AND package-lock.json are copied -# where available (npm@5+) -COPY --chown=user package*.json $HOME/app/ - -RUN npm install - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app/ - -RUN npm run build - -ENV PORT 7860 -EXPOSE 7860 - -CMD npm start diff --git a/spaces/lqy09/GT/public/GTest/main.html b/spaces/lqy09/GT/public/GTest/main.html deleted file mode 100644 index d90601a2a2c1179c82e048cfb8ec166b783dbc57..0000000000000000000000000000000000000000 --- a/spaces/lqy09/GT/public/GTest/main.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - GTest - - - - - -


      -
      - -
      -
      -
      -
      -
      - -
      - - - - - \ No newline at end of file diff --git a/spaces/luost26/DiffAb/diffab/tools/relax/pyrosetta_relaxer.py b/spaces/luost26/DiffAb/diffab/tools/relax/pyrosetta_relaxer.py deleted file mode 100644 index 2696f313850d2faa1695883904884e9ccb9cd964..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/diffab/tools/relax/pyrosetta_relaxer.py +++ /dev/null @@ -1,189 +0,0 @@ -# pyright: reportMissingImports=false -import os -import time -import pyrosetta -from pyrosetta.rosetta.protocols.relax import FastRelax -from pyrosetta.rosetta.core.pack.task import TaskFactory -from pyrosetta.rosetta.core.pack.task import operation -from pyrosetta.rosetta.core.select import residue_selector as selections -from pyrosetta.rosetta.core.select.movemap import MoveMapFactory, move_map_action -pyrosetta.init(' '.join([ - '-mute', 'all', - '-use_input_sc', - '-ignore_unrecognized_res', - '-ignore_zero_occupancy', 'false', - '-load_PDB_components', 'false', - '-relax:default_repeats', '2', - '-no_fconfig', -])) - -from diffab.tools.relax.base import RelaxTask - - -def current_milli_time(): - return round(time.time() * 1000) - - -def parse_residue_position(p): - icode = None - if not p[-1].isnumeric(): # Has ICODE - icode = p[-1] - - for i, c in enumerate(p): - if c.isnumeric(): - break - chain = p[:i] - resseq = int(p[i:]) - - if icode is not None: - return chain, resseq, icode - else: - return chain, resseq - - -def get_scorefxn(scorefxn_name:str): - """ - Gets the scorefxn with appropriate corrections. - Taken from: https://gist.github.com/matteoferla/b33585f3aeab58b8424581279e032550 - """ - import pyrosetta - - corrections = { - 'beta_july15': False, - 'beta_nov16': False, - 'gen_potential': False, - 'restore_talaris_behavior': False, - } - if 'beta_july15' in scorefxn_name or 'beta_nov15' in scorefxn_name: - # beta_july15 is ref2015 - corrections['beta_july15'] = True - elif 'beta_nov16' in scorefxn_name: - corrections['beta_nov16'] = True - elif 'genpot' in scorefxn_name: - corrections['gen_potential'] = True - pyrosetta.rosetta.basic.options.set_boolean_option('corrections:beta_july15', True) - elif 'talaris' in scorefxn_name: #2013 and 2014 - corrections['restore_talaris_behavior'] = True - else: - pass - for corr, value in corrections.items(): - pyrosetta.rosetta.basic.options.set_boolean_option(f'corrections:{corr}', value) - return pyrosetta.create_score_function(scorefxn_name) - - -class RelaxRegion(object): - - def __init__(self, scorefxn='ref2015', max_iter=1000, subset='nbrs', move_bb=True): - super().__init__() - self.scorefxn = get_scorefxn(scorefxn) - self.fast_relax = FastRelax() - self.fast_relax.set_scorefxn(self.scorefxn) - self.fast_relax.max_iter(max_iter) - assert subset in ('all', 'target', 'nbrs') - self.subset = subset - self.move_bb = move_bb - - def __call__(self, pdb_path, flexible_residue_first, flexible_residue_last): - pose = pyrosetta.pose_from_pdb(pdb_path) - start_t = current_milli_time() - original_pose = pose.clone() - - tf = TaskFactory() - tf.push_back(operation.InitializeFromCommandline()) - tf.push_back(operation.RestrictToRepacking()) # Only allow residues to repack. No design at any position. - - # Create selector for the region to be relaxed - # Turn off design and repacking on irrelevant positions - if flexible_residue_first[-1] == ' ': - flexible_residue_first = flexible_residue_first[:-1] - if flexible_residue_last[-1] == ' ': - flexible_residue_last = flexible_residue_last[:-1] - if self.subset != 'all': - gen_selector = selections.ResidueIndexSelector() - gen_selector.set_index_range( - pose.pdb_info().pdb2pose(*flexible_residue_first), - pose.pdb_info().pdb2pose(*flexible_residue_last), - ) - nbr_selector = selections.NeighborhoodResidueSelector() - nbr_selector.set_focus_selector(gen_selector) - nbr_selector.set_include_focus_in_subset(True) - - if self.subset == 'nbrs': - subset_selector = nbr_selector - elif self.subset == 'target': - subset_selector = gen_selector - - prevent_repacking_rlt = operation.PreventRepackingRLT() - prevent_subset_repacking = operation.OperateOnResidueSubset( - prevent_repacking_rlt, - subset_selector, - flip_subset=True, - ) - tf.push_back(prevent_subset_repacking) - - scorefxn = self.scorefxn - fr = self.fast_relax - - pose = original_pose.clone() - pos_list = pyrosetta.rosetta.utility.vector1_unsigned_long() - for pos in range(pose.pdb_info().pdb2pose(*flexible_residue_first), pose.pdb_info().pdb2pose(*flexible_residue_last)+1): - pos_list.append(pos) - # basic_idealize(pose, pos_list, scorefxn, fast=True) - - mmf = MoveMapFactory() - if self.move_bb: - mmf.add_bb_action(move_map_action.mm_enable, gen_selector) - mmf.add_chi_action(move_map_action.mm_enable, subset_selector) - mm = mmf.create_movemap_from_pose(pose) - - fr.set_movemap(mm) - fr.set_task_factory(tf) - fr.apply(pose) - - e_before = scorefxn(original_pose) - e_relax = scorefxn(pose) - # print('\n\n[Finished in %.2f secs]' % ((current_milli_time() - start_t) / 1000)) - # print(' > Energy (before): %.4f' % scorefxn(original_pose)) - # print(' > Energy (optimized): %.4f' % scorefxn(pose)) - return pose, e_before, e_relax - - -def run_pyrosetta(task: RelaxTask): - if not task.can_proceed() : - return task - if task.update_if_finished('rosetta'): - return task - - minimizer = RelaxRegion() - pose_min, _, _ = minimizer( - pdb_path = task.current_path, - flexible_residue_first = task.flexible_residue_first, - flexible_residue_last = task.flexible_residue_last, - ) - - out_path = task.set_current_path_tag('rosetta') - pose_min.dump_pdb(out_path) - task.mark_success() - return task - - -def run_pyrosetta_fixbb(task: RelaxTask): - if not task.can_proceed() : - return task - if task.update_if_finished('fixbb'): - return task - - minimizer = RelaxRegion(move_bb=False) - pose_min, _, _ = minimizer( - pdb_path = task.current_path, - flexible_residue_first = task.flexible_residue_first, - flexible_residue_last = task.flexible_residue_last, - ) - - out_path = task.set_current_path_tag('fixbb') - pose_min.dump_pdb(out_path) - task.mark_success() - return task - - - diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/losses.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/losses.py deleted file mode 100644 index dbd513dde6c65389672378c07e1e470e6464603d..0000000000000000000000000000000000000000 --- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/losses.py +++ /dev/null @@ -1,423 +0,0 @@ -import math -import torch -from torch import autograd as autograd -from torch import nn as nn -from torch.nn import functional as F - -from basicsr.archs.vgg_arch import VGGFeatureExtractor -from basicsr.utils.registry import LOSS_REGISTRY -from .loss_util import weighted_loss - -_reduction_modes = ['none', 'mean', 'sum'] - - -@weighted_loss -def l1_loss(pred, target): - return F.l1_loss(pred, target, reduction='none') - - -@weighted_loss -def mse_loss(pred, target): - return F.mse_loss(pred, target, reduction='none') - - -@weighted_loss -def charbonnier_loss(pred, target, eps=1e-12): - return torch.sqrt((pred - target)**2 + eps) - - -@LOSS_REGISTRY.register() -class L1Loss(nn.Module): - """L1 (mean absolute error, MAE) loss. - - Args: - loss_weight (float): Loss weight for L1 loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - super(L1Loss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * l1_loss(pred, target, weight, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class MSELoss(nn.Module): - """MSE (L2) loss. - - Args: - loss_weight (float): Loss weight for MSE loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - super(MSELoss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class CharbonnierLoss(nn.Module): - """Charbonnier loss (one variant of Robust L1Loss, a differentiable - variant of L1Loss). - - Described in "Deep Laplacian Pyramid Networks for Fast and Accurate - Super-Resolution". - - Args: - loss_weight (float): Loss weight for L1 loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - eps (float): A value used to control the curvature near zero. - Default: 1e-12. - """ - - def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12): - super(CharbonnierLoss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - self.eps = eps - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise - weights. Default: None. - """ - return self.loss_weight * charbonnier_loss(pred, target, weight, eps=self.eps, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class WeightedTVLoss(L1Loss): - """Weighted TV loss. - - Args: - loss_weight (float): Loss weight. Default: 1.0. - """ - - def __init__(self, loss_weight=1.0): - super(WeightedTVLoss, self).__init__(loss_weight=loss_weight) - - def forward(self, pred, weight=None): - y_diff = super(WeightedTVLoss, self).forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=weight[:, :, :-1, :]) - x_diff = super(WeightedTVLoss, self).forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=weight[:, :, :, :-1]) - - loss = x_diff + y_diff - - return loss - - -@LOSS_REGISTRY.register() -class PerceptualLoss(nn.Module): - """Perceptual loss with commonly used style loss. - - Args: - layer_weights (dict): The weight for each layer of vgg feature. - Here is an example: {'conv5_4': 1.}, which means the conv5_4 - feature layer (before relu5_4) will be extracted with weight - 1.0 in calculting losses. - vgg_type (str): The type of vgg network used as feature extractor. - Default: 'vgg19'. - use_input_norm (bool): If True, normalize the input image in vgg. - Default: True. - range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. - Default: False. - perceptual_weight (float): If `perceptual_weight > 0`, the perceptual - loss will be calculated and the loss will multiplied by the - weight. Default: 1.0. - style_weight (float): If `style_weight > 0`, the style loss will be - calculated and the loss will multiplied by the weight. - Default: 0. - criterion (str): Criterion used for perceptual loss. Default: 'l1'. - """ - - def __init__(self, - layer_weights, - vgg_type='vgg19', - use_input_norm=True, - range_norm=False, - perceptual_weight=1.0, - style_weight=0., - criterion='l1'): - super(PerceptualLoss, self).__init__() - self.perceptual_weight = perceptual_weight - self.style_weight = style_weight - self.layer_weights = layer_weights - self.vgg = VGGFeatureExtractor( - layer_name_list=list(layer_weights.keys()), - vgg_type=vgg_type, - use_input_norm=use_input_norm, - range_norm=range_norm) - - self.criterion_type = criterion - if self.criterion_type == 'l1': - self.criterion = torch.nn.L1Loss() - elif self.criterion_type == 'l2': - self.criterion = torch.nn.L2loss() - elif self.criterion_type == 'fro': - self.criterion = None - else: - raise NotImplementedError(f'{criterion} criterion has not been supported.') - - def forward(self, x, gt): - """Forward function. - - Args: - x (Tensor): Input tensor with shape (n, c, h, w). - gt (Tensor): Ground-truth tensor with shape (n, c, h, w). - - Returns: - Tensor: Forward results. - """ - # extract vgg features - x_features = self.vgg(x) - gt_features = self.vgg(gt.detach()) - - # calculate perceptual loss - if self.perceptual_weight > 0: - percep_loss = 0 - for k in x_features.keys(): - if self.criterion_type == 'fro': - percep_loss += torch.norm(x_features[k] - gt_features[k], p='fro') * self.layer_weights[k] - else: - percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k] - percep_loss *= self.perceptual_weight - else: - percep_loss = None - - # calculate style loss - if self.style_weight > 0: - style_loss = 0 - for k in x_features.keys(): - if self.criterion_type == 'fro': - style_loss += torch.norm( - self._gram_mat(x_features[k]) - self._gram_mat(gt_features[k]), p='fro') * self.layer_weights[k] - else: - style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat( - gt_features[k])) * self.layer_weights[k] - style_loss *= self.style_weight - else: - style_loss = None - - return percep_loss, style_loss - - def _gram_mat(self, x): - """Calculate Gram matrix. - - Args: - x (torch.Tensor): Tensor with shape of (n, c, h, w). - - Returns: - torch.Tensor: Gram matrix. - """ - n, c, h, w = x.size() - features = x.view(n, c, w * h) - features_t = features.transpose(1, 2) - gram = features.bmm(features_t) / (c * h * w) - return gram - - -@LOSS_REGISTRY.register() -class GANLoss(nn.Module): - """Define GAN loss. - - Args: - gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'. - real_label_val (float): The value for real label. Default: 1.0. - fake_label_val (float): The value for fake label. Default: 0.0. - loss_weight (float): Loss weight. Default: 1.0. - Note that loss_weight is only for generators; and it is always 1.0 - for discriminators. - """ - - def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0): - super(GANLoss, self).__init__() - self.gan_type = gan_type - self.loss_weight = loss_weight - self.real_label_val = real_label_val - self.fake_label_val = fake_label_val - - if self.gan_type == 'vanilla': - self.loss = nn.BCEWithLogitsLoss() - elif self.gan_type == 'lsgan': - self.loss = nn.MSELoss() - elif self.gan_type == 'wgan': - self.loss = self._wgan_loss - elif self.gan_type == 'wgan_softplus': - self.loss = self._wgan_softplus_loss - elif self.gan_type == 'hinge': - self.loss = nn.ReLU() - else: - raise NotImplementedError(f'GAN type {self.gan_type} is not implemented.') - - def _wgan_loss(self, input, target): - """wgan loss. - - Args: - input (Tensor): Input tensor. - target (bool): Target label. - - Returns: - Tensor: wgan loss. - """ - return -input.mean() if target else input.mean() - - def _wgan_softplus_loss(self, input, target): - """wgan loss with soft plus. softplus is a smooth approximation to the - ReLU function. - - In StyleGAN2, it is called: - Logistic loss for discriminator; - Non-saturating loss for generator. - - Args: - input (Tensor): Input tensor. - target (bool): Target label. - - Returns: - Tensor: wgan loss. - """ - return F.softplus(-input).mean() if target else F.softplus(input).mean() - - def get_target_label(self, input, target_is_real): - """Get target label. - - Args: - input (Tensor): Input tensor. - target_is_real (bool): Whether the target is real or fake. - - Returns: - (bool | Tensor): Target tensor. Return bool for wgan, otherwise, - return Tensor. - """ - - if self.gan_type in ['wgan', 'wgan_softplus']: - return target_is_real - target_val = (self.real_label_val if target_is_real else self.fake_label_val) - return input.new_ones(input.size()) * target_val - - def forward(self, input, target_is_real, is_disc=False): - """ - Args: - input (Tensor): The input for the loss module, i.e., the network - prediction. - target_is_real (bool): Whether the targe is real or fake. - is_disc (bool): Whether the loss for discriminators or not. - Default: False. - - Returns: - Tensor: GAN loss value. - """ - target_label = self.get_target_label(input, target_is_real) - if self.gan_type == 'hinge': - if is_disc: # for discriminators in hinge-gan - input = -input if target_is_real else input - loss = self.loss(1 + input).mean() - else: # for generators in hinge-gan - loss = -input.mean() - else: # other gan types - loss = self.loss(input, target_label) - - # loss_weight is always 1.0 for discriminators - return loss if is_disc else loss * self.loss_weight - - -def r1_penalty(real_pred, real_img): - """R1 regularization for discriminator. The core idea is to - penalize the gradient on real data alone: when the - generator distribution produces the true data distribution - and the discriminator is equal to 0 on the data manifold, the - gradient penalty ensures that the discriminator cannot create - a non-zero gradient orthogonal to the data manifold without - suffering a loss in the GAN game. - - Ref: - Eq. 9 in Which training methods for GANs do actually converge. - """ - grad_real = autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0] - grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean() - return grad_penalty - - -def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): - noise = torch.randn_like(fake_img) / math.sqrt(fake_img.shape[2] * fake_img.shape[3]) - grad = autograd.grad(outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0] - path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) - - path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) - - path_penalty = (path_lengths - path_mean).pow(2).mean() - - return path_penalty, path_lengths.detach().mean(), path_mean.detach() - - -def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None): - """Calculate gradient penalty for wgan-gp. - - Args: - discriminator (nn.Module): Network for the discriminator. - real_data (Tensor): Real input data. - fake_data (Tensor): Fake input data. - weight (Tensor): Weight tensor. Default: None. - - Returns: - Tensor: A tensor for gradient penalty. - """ - - batch_size = real_data.size(0) - alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1)) - - # interpolate between real_data and fake_data - interpolates = alpha * real_data + (1. - alpha) * fake_data - interpolates = autograd.Variable(interpolates, requires_grad=True) - - disc_interpolates = discriminator(interpolates) - gradients = autograd.grad( - outputs=disc_interpolates, - inputs=interpolates, - grad_outputs=torch.ones_like(disc_interpolates), - create_graph=True, - retain_graph=True, - only_inputs=True)[0] - - if weight is not None: - gradients = gradients * weight - - gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean() - if weight is not None: - gradients_penalty /= torch.mean(weight) - - return gradients_penalty diff --git a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/base_dataset.py b/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/base_dataset.py deleted file mode 100644 index 57595dd0bf9dd20e333bd78a6a97013b9a6d0a43..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/base_dataset.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import torch.utils.data as data -from PIL import Image -import torchvision.transforms as transforms -import numpy as np -import random - - -class BaseDataset(data.Dataset): - def __init__(self): - super(BaseDataset, self).__init__() - - @staticmethod - def modify_commandline_options(parser, is_train): - return parser - - def initialize(self, opt): - pass - - -def get_params(opt, size): - w, h = size - new_h = h - new_w = w - if opt.preprocess_mode == "resize_and_crop": - new_h = new_w = opt.load_size - elif opt.preprocess_mode == "scale_width_and_crop": - new_w = opt.load_size - new_h = opt.load_size * h // w - elif opt.preprocess_mode == "scale_shortside_and_crop": - ss, ls = min(w, h), max(w, h) # shortside and longside - width_is_shorter = w == ss - ls = int(opt.load_size * ls / ss) - new_w, new_h = (ss, ls) if width_is_shorter else (ls, ss) - - x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) - y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) - - flip = random.random() > 0.5 - return {"crop_pos": (x, y), "flip": flip} - - -def get_transform(opt, params, method=Image.BICUBIC, normalize=True, toTensor=True): - transform_list = [] - if "resize" in opt.preprocess_mode: - osize = [opt.load_size, opt.load_size] - transform_list.append(transforms.Resize(osize, interpolation=method)) - elif "scale_width" in opt.preprocess_mode: - transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) - elif "scale_shortside" in opt.preprocess_mode: - transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, method))) - - if "crop" in opt.preprocess_mode: - transform_list.append(transforms.Lambda(lambda img: __crop(img, params["crop_pos"], opt.crop_size))) - - if opt.preprocess_mode == "none": - base = 32 - transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method))) - - if opt.preprocess_mode == "fixed": - w = opt.crop_size - h = round(opt.crop_size / opt.aspect_ratio) - transform_list.append(transforms.Lambda(lambda img: __resize(img, w, h, method))) - - if opt.isTrain and not opt.no_flip: - transform_list.append(transforms.Lambda(lambda img: __flip(img, params["flip"]))) - - if toTensor: - transform_list += [transforms.ToTensor()] - - if normalize: - transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - return transforms.Compose(transform_list) - - -def normalize(): - return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - - -def __resize(img, w, h, method=Image.BICUBIC): - return img.resize((w, h), method) - - -def __make_power_2(img, base, method=Image.BICUBIC): - ow, oh = img.size - h = int(round(oh / base) * base) - w = int(round(ow / base) * base) - if (h == oh) and (w == ow): - return img - return img.resize((w, h), method) - - -def __scale_width(img, target_width, method=Image.BICUBIC): - ow, oh = img.size - if ow == target_width: - return img - w = target_width - h = int(target_width * oh / ow) - return img.resize((w, h), method) - - -def __scale_shortside(img, target_width, method=Image.BICUBIC): - ow, oh = img.size - ss, ls = min(ow, oh), max(ow, oh) # shortside and longside - width_is_shorter = ow == ss - if ss == target_width: - return img - ls = int(target_width * ls / ss) - nw, nh = (ss, ls) if width_is_shorter else (ls, ss) - return img.resize((nw, nh), method) - - -def __crop(img, pos, size): - ow, oh = img.size - x1, y1 = pos - tw = th = size - return img.crop((x1, y1, x1 + tw, y1 + th)) - - -def __flip(img, flip): - if flip: - return img.transpose(Image.FLIP_LEFT_RIGHT) - return img diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/app.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/app.py deleted file mode 100644 index beffa167a27090b0f69a751f2e9198370aec5d6f..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/app.py +++ /dev/null @@ -1,165 +0,0 @@ -import gradio as gr -import os -import cv2 -import shutil -import sys -from subprocess import call -import torch -import numpy as np -from skimage import color -import torchvision.transforms as transforms -from PIL import Image -import torch - -os.system("pip install dlib") -os.system('bash setup.sh') - -def lab2rgb(L, AB): - """Convert an Lab tensor image to a RGB numpy output - Parameters: - L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array) - AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array) - - Returns: - rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array) - """ - AB2 = AB * 110.0 - L2 = (L + 1.0) * 50.0 - Lab = torch.cat([L2, AB2], dim=1) - Lab = Lab[0].data.cpu().float().numpy() - Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0)) - rgb = color.lab2rgb(Lab) * 255 - return rgb - -def get_transform(model_name,params=None, grayscale=False, method=Image.BICUBIC): - #params - preprocess = 'resize' - load_size = 256 - crop_size = 256 - transform_list = [] - if grayscale: - transform_list.append(transforms.Grayscale(1)) - if model_name == "Pix2Pix Unet 256": - osize = [load_size, load_size] - transform_list.append(transforms.Resize(osize, method)) - # if 'crop' in preprocess: - # if params is None: - # transform_list.append(transforms.RandomCrop(crop_size)) - - return transforms.Compose(transform_list) - -def inferRestoration(img, model_name): - #if model_name == "Pix2Pix": - model = torch.hub.load('manhkhanhad/ImageRestorationInfer', 'pix2pixRestoration_unet256') - transform_list = [ - transforms.ToTensor(), - transforms.Resize([256,256], Image.BICUBIC), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - ] - transform = transforms.Compose(transform_list) - img = transform(img) - img = torch.unsqueeze(img, 0) - result = model(img) - result = result[0].detach() - result = (result +1)/2.0 - - result = transforms.ToPILImage()(result) - return result - -def inferColorization(img,model_name): - #print(model_name) - if model_name == "Pix2Pix Resnet 9block": - model = torch.hub.load('manhkhanhad/ImageRestorationInfer', 'pix2pixColorization_resnet9b') - elif model_name == "Pix2Pix Unet 256": - model = torch.hub.load('manhkhanhad/ImageRestorationInfer', 'pix2pixColorization_unet256') - elif model_name == "Deoldify": - model = torch.hub.load('manhkhanhad/ImageRestorationInfer', 'DeOldifyColorization') - transform_list = [ - transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,)) - ] - transform = transforms.Compose(transform_list) - #a = transforms.ToTensor()(a) - img = img.convert('L') - img = transform(img) - img = torch.unsqueeze(img, 0) - result = model(img) - - result = result[0].detach() - result = (result +1)/2.0 - - #img = transforms.Grayscale(3)(img) - #img = transforms.ToTensor()(img) - #img = torch.unsqueeze(img, 0) - #result = model(img) - #result = torch.clip(result, min=0, max=1) - image_pil = transforms.ToPILImage()(result) - return image_pil - - transform_seq = get_transform(model_name) - img = transform_seq(img) - # if model_name == "Pix2Pix Unet 256": - # img.resize((256,256)) - img = np.array(img) - lab = color.rgb2lab(img).astype(np.float32) - lab_t = transforms.ToTensor()(lab) - A = lab_t[[0], ...] / 50.0 - 1.0 - B = lab_t[[1, 2], ...] / 110.0 - #data = {'A': A, 'B': B, 'A_paths': "", 'B_paths': ""} - L = torch.unsqueeze(A, 0) - #print(L.shape) - ab = model(L) - Lab = lab2rgb(L, ab).astype(np.uint8) - image_pil = Image.fromarray(Lab) - #image_pil.save('test.png') - #print(Lab.shape) - return image_pil - -def colorizaition(image,model_name): - image = Image.fromarray(image) - result = inferColorization(image,model_name) - return result - - -def run_cmd(command): - try: - call(command, shell=True) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) - -def run(image,Restoration_mode, Colorizaition_mode): - if Restoration_mode == "BOPBTL": - if os.path.isdir("Temp"): - shutil.rmtree("Temp") - - os.makedirs("Temp") - os.makedirs("Temp/input") - print(type(image)) - cv2.imwrite("Temp/input/input_img.png", image) - - command = ("python run.py --input_folder " - + "Temp/input" - + " --output_folder " - + "Temp" - + " --GPU " - + "-1" - + " --with_scratch") - run_cmd(command) - - result_restoration = Image.open("Temp/final_output/input_img.png") - shutil.rmtree("Temp") - - elif Restoration_mode == "Pix2Pix": - result_restoration = inferRestoration(image, Restoration_mode) - print("Restoration_mode",Restoration_mode) - - result_colorization = inferColorization(result_restoration,Colorizaition_mode) - - return result_colorization - -examples = [['example/1.jpeg',"BOPBTL","Deoldify"],['example/2.jpg',"BOPBTL","Deoldify"],['example/3.jpg',"BOPBTL","Deoldify"],['example/4.jpg',"BOPBTL","Deoldify"]] -iface = gr.Interface(run, - [gr.inputs.Image(),gr.inputs.Radio(["BOPBTL", "Pix2Pix"]),gr.inputs.Radio(["Deoldify", "Pix2Pix Resnet 9block","Pix2Pix Unet 256"])], - outputs="image", - examples=examples).launch(debug=True,share=False) \ No newline at end of file diff --git a/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py b/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py deleted file mode 100644 index 47ab4b1594faf1e9f1ce962fb980d80295b1f079..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Base classes for the datasets that also provide non-audio metadata, -e.g. description, text transcription etc. -""" -from dataclasses import dataclass -import logging -import math -import re -import typing as tp - -import torch - -from .audio_dataset import AudioDataset, AudioMeta -from ..environment import AudioCraftEnvironment -from ..modules.conditioners import SegmentWithAttributes, ConditioningAttributes - - -logger = logging.getLogger(__name__) - - -def _clusterify_meta(meta: AudioMeta) -> AudioMeta: - """Monkey-patch meta to match cluster specificities.""" - meta.path = AudioCraftEnvironment.apply_dataset_mappers(meta.path) - if meta.info_path is not None: - meta.info_path.zip_path = AudioCraftEnvironment.apply_dataset_mappers(meta.info_path.zip_path) - return meta - - -def clusterify_all_meta(meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]: - """Monkey-patch all meta to match cluster specificities.""" - return [_clusterify_meta(m) for m in meta] - - -@dataclass -class AudioInfo(SegmentWithAttributes): - """Dummy SegmentInfo with empty attributes. - - The InfoAudioDataset is expected to return metadata that inherits - from SegmentWithAttributes class and can return conditioning attributes. - - This basically guarantees all datasets will be compatible with current - solver that contain conditioners requiring this. - """ - audio_tokens: tp.Optional[torch.Tensor] = None # populated when using cached batch for training a LM. - - def to_condition_attributes(self) -> ConditioningAttributes: - return ConditioningAttributes() - - -class InfoAudioDataset(AudioDataset): - """AudioDataset that always returns metadata as SegmentWithAttributes along with the audio waveform. - - See `audiocraft.data.audio_dataset.AudioDataset` for initialization arguments. - """ - def __init__(self, meta: tp.List[AudioMeta], **kwargs): - super().__init__(clusterify_all_meta(meta), **kwargs) - - def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentWithAttributes]]: - if not self.return_info: - wav = super().__getitem__(index) - assert isinstance(wav, torch.Tensor) - return wav - wav, meta = super().__getitem__(index) - return wav, AudioInfo(**meta.to_dict()) - - -def get_keyword_or_keyword_list(value: tp.Optional[str]) -> tp.Union[tp.Optional[str], tp.Optional[tp.List[str]]]: - """Preprocess a single keyword or possible a list of keywords.""" - if isinstance(value, list): - return get_keyword_list(value) - else: - return get_keyword(value) - - -def get_string(value: tp.Optional[str]) -> tp.Optional[str]: - """Preprocess a single keyword.""" - if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None': - return None - else: - return value.strip() - - -def get_keyword(value: tp.Optional[str]) -> tp.Optional[str]: - """Preprocess a single keyword.""" - if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None': - return None - else: - return value.strip().lower() - - -def get_keyword_list(values: tp.Union[str, tp.List[str]]) -> tp.Optional[tp.List[str]]: - """Preprocess a list of keywords.""" - if isinstance(values, str): - values = [v.strip() for v in re.split(r'[,\s]', values)] - elif isinstance(values, float) and math.isnan(values): - values = [] - if not isinstance(values, list): - logger.debug(f"Unexpected keyword list {values}") - values = [str(values)] - - kws = [get_keyword(v) for v in values] - kw_list = [k for k in kws if k is not None] - if len(kw_list) == 0: - return None - else: - return kw_list diff --git a/spaces/matthoffner/open-codetree/components/Loader.tsx b/spaces/matthoffner/open-codetree/components/Loader.tsx deleted file mode 100644 index 4be0f62969723895e409c78b9b143380418bd20f..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/open-codetree/components/Loader.tsx +++ /dev/null @@ -1,64 +0,0 @@ -import React from "react"; - -interface LoaderProps { - size?: number; - color?: string; -} - -const Loader = ({ size = 50, color = "#FFFFFF" }: LoaderProps) => { - return ( -
      - - - - - - - - - - - - - - - -
      - ); -}; - -export default Loader; diff --git a/spaces/matthoffner/starchat-ui/types/plugin.ts b/spaces/matthoffner/starchat-ui/types/plugin.ts deleted file mode 100644 index 43da6c07b0f5c6ee022225babe72cb58ff0939f4..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/types/plugin.ts +++ /dev/null @@ -1,39 +0,0 @@ -import { KeyValuePair } from './data'; - -export interface Plugin { - id: PluginID; - name: PluginName; - requiredKeys: KeyValuePair[]; -} - -export interface PluginKey { - pluginId: PluginID; - requiredKeys: KeyValuePair[]; -} - -export enum PluginID { - GOOGLE_SEARCH = 'google-search', -} - -export enum PluginName { - GOOGLE_SEARCH = 'Google Search', -} - -export const Plugins: Record = { - [PluginID.GOOGLE_SEARCH]: { - id: PluginID.GOOGLE_SEARCH, - name: PluginName.GOOGLE_SEARCH, - requiredKeys: [ - { - key: 'GOOGLE_API_KEY', - value: '', - }, - { - key: 'GOOGLE_CSE_ID', - value: '', - }, - ], - }, -}; - -export const PluginList = Object.values(Plugins); diff --git a/spaces/maxmax20160403/sovits5.0/hubert/__init__.py b/spaces/maxmax20160403/sovits5.0/hubert/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mayura25/handwritten_digit_recognition/README.md b/spaces/mayura25/handwritten_digit_recognition/README.md deleted file mode 100644 index 9bf847eaf8b1b71d2a2232c858c63e37bac55d0f..0000000000000000000000000000000000000000 --- a/spaces/mayura25/handwritten_digit_recognition/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Handwritten Digit Recognition -emoji: 🌍 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 4.1.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/model/fiber.py b/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/model/fiber.py deleted file mode 100644 index 38db33b0d27d70116c92650176170e9b3cf9a9c7..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/model/fiber.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# -# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES -# SPDX-License-Identifier: MIT - - -from collections import namedtuple -from itertools import product -from typing import Dict - -import torch -from torch import Tensor - -from se3_transformer.runtime.utils import degree_to_dim - -FiberEl = namedtuple('FiberEl', ['degree', 'channels']) - - -class Fiber(dict): - """ - Describes the structure of some set of features. - Features are split into types (0, 1, 2, 3, ...). A feature of type k has a dimension of 2k+1. - Type-0 features: invariant scalars - Type-1 features: equivariant 3D vectors - Type-2 features: equivariant symmetric traceless matrices - ... - - As inputs to a SE3 layer, there can be many features of the same types, and many features of different types. - The 'multiplicity' or 'number of channels' is the number of features of a given type. - This class puts together all the degrees and their multiplicities in order to describe - the inputs, outputs or hidden features of SE3 layers. - """ - - def __init__(self, structure): - if isinstance(structure, dict): - structure = [FiberEl(int(d), int(m)) for d, m in sorted(structure.items(), key=lambda x: x[1])] - elif not isinstance(structure[0], FiberEl): - structure = list(map(lambda t: FiberEl(*t), sorted(structure, key=lambda x: x[1]))) - self.structure = structure - super().__init__({d: m for d, m in self.structure}) - - @property - def degrees(self): - return sorted([t.degree for t in self.structure]) - - @property - def channels(self): - return [self[d] for d in self.degrees] - - @property - def num_features(self): - """ Size of the resulting tensor if all features were concatenated together """ - return sum(t.channels * degree_to_dim(t.degree) for t in self.structure) - - @staticmethod - def create(num_degrees: int, num_channels: int): - """ Create a Fiber with degrees 0..num_degrees-1, all with the same multiplicity """ - return Fiber([(degree, num_channels) for degree in range(num_degrees)]) - - @staticmethod - def from_features(feats: Dict[str, Tensor]): - """ Infer the Fiber structure from a feature dict """ - structure = {} - for k, v in feats.items(): - degree = int(k) - assert len(v.shape) == 3, 'Feature shape should be (N, C, 2D+1)' - assert v.shape[-1] == degree_to_dim(degree) - structure[degree] = v.shape[-2] - return Fiber(structure) - - def __getitem__(self, degree: int): - """ fiber[degree] returns the multiplicity for this degree """ - return dict(self.structure).get(degree, 0) - - def __iter__(self): - """ Iterate over namedtuples (degree, channels) """ - return iter(self.structure) - - def __mul__(self, other): - """ - If other in an int, multiplies all the multiplicities by other. - If other is a fiber, returns the cartesian product. - """ - if isinstance(other, Fiber): - return product(self.structure, other.structure) - elif isinstance(other, int): - return Fiber({t.degree: t.channels * other for t in self.structure}) - - def __add__(self, other): - """ - If other in an int, add other to all the multiplicities. - If other is a fiber, add the multiplicities of the fibers together. - """ - if isinstance(other, Fiber): - return Fiber({t.degree: t.channels + other[t.degree] for t in self.structure}) - elif isinstance(other, int): - return Fiber({t.degree: t.channels + other for t in self.structure}) - - def __repr__(self): - return str(self.structure) - - @staticmethod - def combine_max(f1, f2): - """ Combine two fiber by taking the maximum multiplicity for each degree in both fibers """ - new_dict = dict(f1.structure) - for k, m in f2.structure: - new_dict[k] = max(new_dict.get(k, 0), m) - - return Fiber(list(new_dict.items())) - - @staticmethod - def combine_selectively(f1, f2): - """ Combine two fiber by taking the sum of multiplicities for each degree in the first fiber """ - # only use orders which occur in fiber f1 - new_dict = dict(f1.structure) - for k in f1.degrees: - if k in f2.degrees: - new_dict[k] += f2[k] - return Fiber(list(new_dict.items())) - - def to_attention_heads(self, tensors: Dict[str, Tensor], num_heads: int): - # dict(N, num_channels, 2d+1) -> (N, num_heads, -1) - fibers = [tensors[str(degree)].reshape(*tensors[str(degree)].shape[:-2], num_heads, -1) for degree in - self.degrees] - fibers = torch.cat(fibers, -1) - return fibers diff --git a/spaces/merve/data-leak/source/anonymization/style-graph-scroll.css b/spaces/merve/data-leak/source/anonymization/style-graph-scroll.css deleted file mode 100644 index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/source/anonymization/style-graph-scroll.css +++ /dev/null @@ -1,160 +0,0 @@ -/** { border: 1px solid #f00; }*/ - - -#container{ - position: relative; - width: auto; - margin-left: -25px; - /*margin-bottom: 100px;*/ -} - -#sections{ - width: 330px; - pointer-events: none; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; - pointer-events: all; -} -#sections > div:last-child{ - height: 480px; - margin-bottom: 0px; -} -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; -} - -.slider-outer { - display: block; - max-width: 300px; -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - left:12px; - } - - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -260px; - } - - #sections > div:last-child{ - height: auto; - } - - #sections h3{ - margin-top: .5em; - } - - /* Adjust buttons for mobile. */ - - .button-container{ - text-align: center; - left:0px; - } - - /* Adjust sliders for mobile. */ - input[type="range" i] { - width: 280px; - } - .slider-label-container{ - width: 145px; - /* display: inline-block; */ - } - - .slide-container-heads-prob, .slide-container-population { - text-align: center; - } - - .slider-container { - margin-bottom: 5px; - text-align: center; - width: 300px; - /* display:inline-block; */ - } - - .slider-outer { - text-align: center; - display: flex; - max-width: 300px; - } - - .headsProb, .population { - margin-left: 15px; - } - - .slide-container-population { - margin-bottom: -10px; - } - - .pointer div { - left: 10px; - top: 37px; - } - - /* Adjust post summary test for mobile. */ - .post-summary{ - margin-left: 8px; - margin-bottom: 60px; - margin-top: 40px; - } - -} - -#graph > div{ - margin: 20 35px; -} - - -#end{ - height: 15vh; -} - diff --git a/spaces/merve/fill-in-the-blank/source/base-rate/sliders.js b/spaces/merve/fill-in-the-blank/source/base-rate/sliders.js deleted file mode 100644 index 994c9ba490dc44dfa015553d32ff24e822f16de0..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/base-rate/sliders.js +++ /dev/null @@ -1,103 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - - -var sliderVals = {} - -var sliders = [ - { - key: 'fNoiseMag', - text: 'Feature Noise', - r: [0, 1], - v: .5 - }, - { - key: 'fBiasMag', - text: 'Feature Bias', - r: [0, 1], - v: .2 - }, -] - -!(function(){ - var width = 145 - var height = 30 - - sliders.forEach(d => { - d.s = d3.scaleLinear().domain(d.r).range([0, width]) - sliderVals[d.key] = d - }) - - var sliderSel = d3.select('.slider').html('') - .appendMany('div', sliders) - .at({class: d => d.key}) - .st({ - display: 'inline-block', - width: width, - paddingRight: 60, - marginTop: 20, - color: '#000' - }) - - sliderSel.append('div') - .text(d => d.text) - .st({marginBottom: height/2}) - - var svgSel = sliderSel.append('svg').at({width, height}) - .on('click', function(d){ - d.v = d.s.invert(d3.mouse(this)[0]) - updatePos() - }) - .st({ - cursor: 'pointer' - }) - .append('g').translate(height/2, 1) - svgSel.append('rect').at({width, height, y: -height/2, fill: '#fff'}) - - svgSel.append('path').at({ - d: `M 0 0 H ${width}`, - stroke: '#000', - strokeWidth: 2 - }) - - var drag = d3.drag() - .on('drag', function(d){ - var x = d3.mouse(this)[0] - d.v = d3.clamp(d3.min(d.r), d.s.invert(x), d3.max(d.r)) - - updatePos() - }) - - var circleSel = svgSel.append('circle') - .at({ - r: height/2, - stroke: '#000', - strokeWidth: 2, - fill: '#fff', - }) - .call(drag) - - - function updatePos(){ - circleSel.at({cx: d => d.s(d.v)}) - if (sliderVals.onUpdate) sliderVals.onUpdate() - } - - updatePos() - sliderVals.updatePos = updatePos -})() diff --git a/spaces/merve/measuring-fairness/public/third_party/index.js b/spaces/merve/measuring-fairness/public/third_party/index.js deleted file mode 100644 index e070ccfa3ac2645f9431b1e4dbee36e81692574d..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/public/third_party/index.js +++ /dev/null @@ -1,74 +0,0 @@ -// https://github.com/1wheel/roadtolarissa Copyright 2018 Adam Pearce - -var fs = require('fs') -var {exec, execSync} = require('child_process') - -var source = `${__dirname}/../../source` -var public = `${__dirname}/../../public` -if (!fs.existsSync(public)) fs.mkdirSync(public) - -function rsyncSource(){ - exec(`rsync -a --exclude _posts --exclude _templates ${source}/ ${public}/`) -} -rsyncSource() - -var hljs = require('highlight.js') -var marked = require('marked') -marked.setOptions({ - highlight: (code, lang) => hljs.highlight(lang || 'html', code).value, - smartypants: true -}) - -var templates = {} -readdirAbs(`${source}/_templates`).forEach(path => { - var str = fs.readFileSync(path, 'utf8') - var templateName = path.split('_templates/')[1] - templates[templateName] = d => eval('`' + str + '`') -}) - -function readdirAbs(dir){ return fs.readdirSync(dir).map(d => dir + '/' + d) } - -var posts = readdirAbs(`${source}/_posts`) - .filter(d => !d.includes('.DS_Store')) - .map(parsePost) - -fs.writeFileSync(public + '/rss.xml', templates['rss.xml'](posts)) -fs.writeFileSync(public + '/sitemap.xml', templates['sitemap.xml'](posts)) - -function parsePost(path){ - var str = fs.readFileSync(path, 'utf8') - if (str[0] == '<') str = str.split('License.\n-->')[1] - var [top, body] = str - .replace('---\n', '') - .split('\n---\n') - - console.log(path) - - var post = {html: path.includes('.html') ? body : marked(body)} - top.split('\n').forEach(line => { - var [key, val] = line.split(/: (.+)/) - post[key] = val - }) - - return post -} - -function writePost(post){ - var dir = public + post.permalink - if (!fs.existsSync(dir)) execSync(`mkdir -p ${dir}`) - fs.writeFileSync(`${dir}/index.html`, templates[post.template](post)) - - var outposts = JSON.parse(JSON.stringify(posts)) - outposts.forEach(d => delete d.html) - fs.writeFileSync(public + '/posts.json', JSON.stringify(outposts, null, 2)) - - -} -posts.forEach(writePost) - -if (process.argv.includes('--watch')){ - require('chokidar').watch(source).on('change', path => { - rsyncSource() - if (path.includes('_posts/')) writePost(parsePost(path)) - }) -} diff --git a/spaces/mikeee/radiobee-dev/radiobee/process_upload.py b/spaces/mikeee/radiobee-dev/radiobee/process_upload.py deleted file mode 100644 index b61958bbf7cdad4d799443aafce9226c7f39d4ab..0000000000000000000000000000000000000000 --- a/spaces/mikeee/radiobee-dev/radiobee/process_upload.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Process uploads.""" -# pylint: disable=invalid-name, unused-import -from typing import Union - -from pathlib import Path -import tempfile -import cchardet -from logzero import logger - - -def process_upload(upload: Union[tempfile._TemporaryFileWrapper, bytes]) -> str: - """Process upload (fileobj or bytes(zip file: io.BytesIO further to zipfile.ZipFile)). - - gr.inputs.File("file"): upload normal file - gr.inputs.File("bytes"): upload zip file - - """ - if isinstance(upload, bytes): - logger.warning("Not implemented, yet, for zip file") - return "Not implemented, yet, for zip file" - - try: - fpath = Path(upload.name) - except Exception as e: - logger.error("Path(upload.name) error: %s", e) - return str(e) - - suffixes = [ - "", - ".txt", - ".text", - ".md", - "tsv", - ] - # check .txt .md ''(no suffix) - if fpath.suffix.lower() not in suffixes: - logger.warning('suffix: [%s] not in %s', fpath.suffix, suffixes) - # return "File type not supported, yet." - - try: - data = Path(upload.name).read_bytes() - except Exception as e: - logger.error("Unable to read data from %s, errors: %s", fpath, e) - data = str(e).encode() - - # no data, empty file, return "" - if not data: - logger.info("empty file: %s", upload.name) - return "" - - encoding = cchardet.detect(data).get("encoding") - - if encoding is not None: - try: - text = fpath.read_text(encoding=encoding) - except Exception as e: - logger.error("Unable to retrieve text, error: %s", e) - text = str(e) - - # return f"{upload.name} {type(upload)}\n\n{text}" - # return f"{upload.name}\n{text}" - return text - - # not able to cchardet: encoding is None, docx, pdf, epub, zip etc - logger.info("Trying docx...to be implemented") - - # T ODO .docx .epub .mobi .pdf etc. - - _ = Path(upload.name) - msg = f"binary file: {_.stem[:-8]}{_.suffix}" - logger.warning("%s", msg) - - return msg - - -_ = ''' # colab gradio-file-inputs-upload.ipynb -# file_to_text/process_file -def zip_to_text(file_obj): - """ - # zf = zipfile.ZipFile('german-recipes-dataset.zip') - zf = file_obj - namelist = zipfile.ZipFile.namelist(zf); - # filename = zf.open(namelist[0]); - file_contents = [] - for filename in namelist: - with zf.open(filename) as fhandle: - file_contents.append(fhandle.read().decode()) - """ - # fileobj is - - # gr.inputs.File("bytes") - if isinstance(file_obj, bytes): - data = file_obj.decode() - return f"{type(file_obj)}\n{dir(file_obj)}\n{data}" - - # "file"/gr.inputs.File("file") file_obj.name: /tmp/READMEzm8hc5ze.md - data = Path(file_obj.name).read_bytes() - return f"{file_obj.name} {type(file_obj)}\n{dir(file_obj)} \n{data}" -# ''' diff --git a/spaces/ml6team/Knowledge-graphs/utils.py b/spaces/ml6team/Knowledge-graphs/utils.py deleted file mode 100644 index 5b23e942e54cc855989de77ade8370a3a66fc7f2..0000000000000000000000000000000000000000 --- a/spaces/ml6team/Knowledge-graphs/utils.py +++ /dev/null @@ -1,8 +0,0 @@ - -def clip_text(t, lenght = 4): - t_sub = t.replace("...", "dotdotdot") - t_clipped = ".".join(t_sub.split(".")[:lenght]) + "." - t_reverted = t_clipped.replace("dotdotdot", "...") - return t_reverted - - diff --git a/spaces/monra/freegpt-webui/client/css/label.css b/spaces/monra/freegpt-webui/client/css/label.css deleted file mode 100644 index d84873d41e41f2cc22f9d3ace67c30ec07706811..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui/client/css/label.css +++ /dev/null @@ -1,16 +0,0 @@ -label { - cursor: pointer; - text-indent: -9999px; - width: 50px; - height: 30px; - backdrop-filter: blur(20px); - -webkit-backdrop-filter: blur(20px); - background-color: var(--blur-bg); - border-radius: var(--border-radius-1); - border: 1px solid var(--blur-border); - display: block; - border-radius: 100px; - position: relative; - overflow: hidden; - transition: 0.33s; -} diff --git a/spaces/mrstuffandthings/Bark-Voice-Cloning/setup.py b/spaces/mrstuffandthings/Bark-Voice-Cloning/setup.py deleted file mode 100644 index 606849326a4002007fd42060b51e69a19c18675c..0000000000000000000000000000000000000000 --- a/spaces/mrstuffandthings/Bark-Voice-Cloning/setup.py +++ /dev/null @@ -1,3 +0,0 @@ -from setuptools import setup - -setup() diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/ema/__init__.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/ema/__init__.py deleted file mode 100644 index 503ceaa609b092e48bd32a0031f4e2ffb875483f..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/ema/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import importlib -import os - -from .ema import EMA - - -def build_ema(model, cfg, device): - return EMA(model, cfg, device) - - -# automatically import any Python files in the models/ema/ directory -for file in sorted(os.listdir(os.path.dirname(__file__))): - if file.endswith(".py") and not file.startswith("_"): - file_name = file[: file.find(".py")] - importlib.import_module("fairseq.models.ema." + file_name) diff --git a/spaces/mthsk/sovits-models-misc/hubert/__init__.py b/spaces/mthsk/sovits-models-misc/hubert/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/vae.py b/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/vae.py deleted file mode 100644 index f83b71b82e40451571f5fbdbb3ca66a3cb26c65b..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/vae.py +++ /dev/null @@ -1,857 +0,0 @@ - - -from typing import Optional, Tuple, Union -from pathlib import Path - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from itertools import chain - -from .unet_blocks import UNetMidBlock2D, get_down_block, get_up_block -from .taming_discriminator import NLayerDiscriminator -from medical_diffusion.models import BasicModel -from torchvision.utils import save_image - -from torch.distributions.normal import Normal -from torch.distributions import kl_divergence - -class Encoder(nn.Module): - def __init__( - self, - in_channels=3, - out_channels=3, - down_block_types=("DownEncoderBlock2D",), - block_out_channels=(64), - layers_per_block=2, - norm_num_groups=32, - act_fn="silu", - double_z=True, - ): - super().__init__() - self.layers_per_block = layers_per_block - - self.conv_in = torch.nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1) - - self.mid_block = None - self.down_blocks = nn.ModuleList([]) - - # down - output_channel = block_out_channels[0] - for i, down_block_type in enumerate(down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i+1] - is_final_block = False #i == len(block_out_channels) - 1 - - down_block = get_down_block( - down_block_type, - num_layers=self.layers_per_block, - in_channels=input_channel, - out_channels=output_channel, - add_downsample=not is_final_block, - resnet_eps=1e-6, - downsample_padding=0, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - attn_num_head_channels=None, - temb_channels=None, - ) - self.down_blocks.append(down_block) - - # mid - self.mid_block = UNetMidBlock2D( - in_channels=block_out_channels[-1], - resnet_eps=1e-6, - resnet_act_fn=act_fn, - output_scale_factor=1, - resnet_time_scale_shift="default", - attn_num_head_channels=None, - resnet_groups=norm_num_groups, - temb_channels=None, - ) - - # out - self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6) - self.conv_act = nn.SiLU() - - conv_out_channels = 2 * out_channels if double_z else out_channels - self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1) - - def forward(self, x): - sample = x - sample = self.conv_in(sample) - - # down - for down_block in self.down_blocks: - sample = down_block(sample) - - # middle - sample = self.mid_block(sample) - - # post-process - sample = self.conv_norm_out(sample) - sample = self.conv_act(sample) - sample = self.conv_out(sample) - - return sample - - -class Decoder(nn.Module): - def __init__( - self, - in_channels=3, - out_channels=3, - up_block_types=("UpDecoderBlock2D",), - block_out_channels=(64,), - layers_per_block=2, - norm_num_groups=32, - act_fn="silu", - ): - super().__init__() - self.layers_per_block = layers_per_block - - self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) - - self.mid_block = None - self.up_blocks = nn.ModuleList([]) - - # mid - self.mid_block = UNetMidBlock2D( - in_channels=block_out_channels[-1], - resnet_eps=1e-6, - resnet_act_fn=act_fn, - output_scale_factor=1, - resnet_time_scale_shift="default", - attn_num_head_channels=None, - resnet_groups=norm_num_groups, - temb_channels=None, - ) - - # up - reversed_block_out_channels = list(reversed(block_out_channels)) - output_channel = reversed_block_out_channels[0] - for i, up_block_type in enumerate(up_block_types): - prev_output_channel = output_channel - output_channel = reversed_block_out_channels[i+1] - - is_final_block = False # i == len(block_out_channels) - 1 - - up_block = get_up_block( - up_block_type, - num_layers=self.layers_per_block + 1, - in_channels=prev_output_channel, - out_channels=output_channel, - prev_output_channel=None, - add_upsample=not is_final_block, - resnet_eps=1e-6, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - attn_num_head_channels=None, - temb_channels=None, - ) - self.up_blocks.append(up_block) - prev_output_channel = output_channel - - # out - self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6) - self.conv_act = nn.SiLU() - self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) - - def forward(self, z): - sample = z - sample = self.conv_in(sample) - - # middle - sample = self.mid_block(sample) - - # up - for up_block in self.up_blocks: - sample = up_block(sample) - - # post-process - sample = self.conv_norm_out(sample) - sample = self.conv_act(sample) - sample = self.conv_out(sample) - - return sample - - -class VectorQuantizer(nn.Module): - """ - Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix - multiplications and allows for post-hoc remapping of indices. - """ - - # NOTE: due to a bug the beta term was applied to the wrong term. for - # backwards compatibility we use the buggy version by default, but you can - # specify legacy=False to fix it. - def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random", sane_index_shape=False, legacy=False): - super().__init__() - self.n_e = n_e - self.e_dim = e_dim - self.beta = beta - self.legacy = legacy - - self.embedding = nn.Embedding(self.n_e, self.e_dim) - self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) - - self.remap = remap - if self.remap is not None: - self.register_buffer("used", torch.tensor(np.load(self.remap))) - self.re_embed = self.used.shape[0] - self.unknown_index = unknown_index # "random" or "extra" or integer - if self.unknown_index == "extra": - self.unknown_index = self.re_embed - self.re_embed = self.re_embed + 1 - print( - f"Remapping {self.n_e} indices to {self.re_embed} indices. " - f"Using {self.unknown_index} for unknown indices." - ) - else: - self.re_embed = n_e - - self.sane_index_shape = sane_index_shape - - def remap_to_used(self, inds): - ishape = inds.shape - assert len(ishape) > 1 - inds = inds.reshape(ishape[0], -1) - used = self.used.to(inds) - match = (inds[:, :, None] == used[None, None, ...]).long() - new = match.argmax(-1) - unknown = match.sum(2) < 1 - if self.unknown_index == "random": - new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device) - else: - new[unknown] = self.unknown_index - return new.reshape(ishape) - - def unmap_to_all(self, inds): - ishape = inds.shape - assert len(ishape) > 1 - inds = inds.reshape(ishape[0], -1) - used = self.used.to(inds) - if self.re_embed > self.used.shape[0]: # extra token - inds[inds >= self.used.shape[0]] = 0 # simply set to zero - back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds) - return back.reshape(ishape) - - def forward(self, z): - # reshape z -> (batch, height, width, channel) and flatten - z = z.permute(0, 2, 3, 1).contiguous() - z_flattened = z.view(-1, self.e_dim) - # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z - - d = ( - torch.sum(z_flattened**2, dim=1, keepdim=True) - + torch.sum(self.embedding.weight**2, dim=1) - - 2 * torch.einsum("bd,dn->bn", z_flattened, self.embedding.weight.t()) - ) - - min_encoding_indices = torch.argmin(d, dim=1) - z_q = self.embedding(min_encoding_indices).view(z.shape) - perplexity = None - min_encodings = None - - # compute loss for embedding - if not self.legacy: - loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2) - else: - loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2) - - # preserve gradients - z_q = z + (z_q - z).detach() - - # reshape back to match original input shape - z_q = z_q.permute(0, 3, 1, 2).contiguous() - - if self.remap is not None: - min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis - min_encoding_indices = self.remap_to_used(min_encoding_indices) - min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten - - if self.sane_index_shape: - min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3]) - - return z_q, loss, (perplexity, min_encodings, min_encoding_indices) - - def get_codebook_entry(self, indices, shape): - # shape specifying (batch, height, width, channel) - if self.remap is not None: - indices = indices.reshape(shape[0], -1) # add batch axis - indices = self.unmap_to_all(indices) - indices = indices.reshape(-1) # flatten again - - # get quantized latent vectors - z_q = self.embedding(indices) - - if shape is not None: - z_q = z_q.view(shape) - # reshape back to match original input shape - z_q = z_q.permute(0, 3, 1, 2).contiguous() - - return z_q - - -class DiagonalGaussianDistribution(object): - def __init__(self, parameters, deterministic=False): - self.batch_size = parameters.shape[0] - self.parameters = parameters - self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) - # self.logvar = torch.clamp(self.logvar, -30.0, 20.0) - self.deterministic = deterministic - self.std = torch.exp(0.5 * self.logvar) - self.var = torch.exp(self.logvar) - if self.deterministic: - self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) - - def sample(self, generator: Optional[torch.Generator] = None) -> torch.FloatTensor: - device = self.parameters.device - sample_device = "cpu" if device.type == "mps" else device - sample = torch.randn(self.mean.shape, generator=generator, device=sample_device).to(device) - x = self.mean + self.std * sample - return x - - def kl(self, other=None): - if self.deterministic: - return torch.Tensor([0.0]) - else: - if other is None: - return 0.5 * torch.sum(torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar)/self.batch_size - else: - return 0.5 * torch.sum( - torch.pow(self.mean - other.mean, 2) / other.var - + self.var / other.var - - 1.0 - - self.logvar - + other.logvar, - )/self.batch_size - - # q_z_x = Normal(self.mean, self.logvar.mul(.5).exp()) - # p_z = Normal(torch.zeros_like(self.mean), torch.ones_like(self.logvar)) - # kl_div = kl_divergence(q_z_x, p_z).sum(1).mean() - # return kl_div - - def nll(self, sample, dims=[1, 2, 3]): - if self.deterministic: - return torch.Tensor([0.0]) - logtwopi = np.log(2.0 * np.pi) - return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, dim=dims) - - def mode(self): - return self.mean - - -class VQModel(nn.Module): - r"""VQ-VAE model from the paper Neural Discrete Representation Learning by Aaron van den Oord, Oriol Vinyals and Koray - Kavukcuoglu. - - This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library - implements for all the model (such as downloading or saving, etc.) - - Parameters: - in_channels (int, *optional*, defaults to 3): Number of channels in the input image. - out_channels (int, *optional*, defaults to 3): Number of channels in the output. - down_block_types (`Tuple[str]`, *optional*, defaults to : - obj:`("DownEncoderBlock2D",)`): Tuple of downsample block types. - up_block_types (`Tuple[str]`, *optional*, defaults to : - obj:`("UpDecoderBlock2D",)`): Tuple of upsample block types. - block_out_channels (`Tuple[int]`, *optional*, defaults to : - obj:`(64,)`): Tuple of block output channels. - act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. - latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space. - sample_size (`int`, *optional*, defaults to `32`): TODO - num_vq_embeddings (`int`, *optional*, defaults to `256`): Number of codebook vectors in the VQ-VAE. - """ - - - def __init__( - self, - in_channels: int = 3, - out_channels: int = 3, - down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"), - up_block_types: Tuple[str] = ("UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"), - block_out_channels: Tuple[int] = (32, 64, 128, 256), - layers_per_block: int = 1, - act_fn: str = "silu", - latent_channels: int = 3, - sample_size: int = 32, - num_vq_embeddings: int = 256, - norm_num_groups: int = 32, - ): - super().__init__() - - # pass init params to Encoder - self.encoder = Encoder( - in_channels=in_channels, - out_channels=latent_channels, - down_block_types=down_block_types, - block_out_channels=block_out_channels, - layers_per_block=layers_per_block, - act_fn=act_fn, - norm_num_groups=norm_num_groups, - double_z=False, - ) - - self.quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1) - self.quantize = VectorQuantizer( - num_vq_embeddings, latent_channels, beta=0.25, remap=None, sane_index_shape=False - ) - self.post_quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1) - - # pass init params to Decoder - self.decoder = Decoder( - in_channels=latent_channels, - out_channels=out_channels, - up_block_types=up_block_types, - block_out_channels=block_out_channels, - layers_per_block=layers_per_block, - act_fn=act_fn, - norm_num_groups=norm_num_groups, - ) - - # def encode(self, x: torch.FloatTensor): - # z = self.encoder(x) - # z = self.quant_conv(z) - # return z - - def encode(self, x, return_loss=True, force_quantize= True): - z = self.encoder(x) - z = self.quant_conv(z) - - if force_quantize: - z_q, emb_loss, _ = self.quantize(z) - else: - z_q, emb_loss = z, None - - if return_loss: - return z_q, emb_loss - else: - return z_q - - def decode(self, z_q) -> torch.FloatTensor: - z_q = self.post_quant_conv(z_q) - x = self.decoder(z_q) - return x - - # def decode(self, z: torch.FloatTensor, return_loss=True, force_quantize: bool = True) -> torch.FloatTensor: - # if force_quantize: - # z_q, emb_loss, _ = self.quantize(z) - # else: - # z_q, emb_loss = z, None - - # z_q = self.post_quant_conv(z_q) - # x = self.decoder(z_q) - - # if return_loss: - # return x, emb_loss - # else: - # return x - - def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor: - r""" - Args: - sample (`torch.FloatTensor`): Input sample. - """ - # h = self.encode(sample) - h, emb_loss = self.encode(sample) - dec = self.decode(h) - # dec, emb_loss = self.decode(h) - - return dec, emb_loss - - -class AutoencoderKL(nn.Module): - r"""Variational Autoencoder (VAE) model with KL loss from the paper Auto-Encoding Variational Bayes by Diederik P. Kingma - and Max Welling. - - This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library - implements for all the model (such as downloading or saving, etc.) - - Parameters: - in_channels (int, *optional*, defaults to 3): Number of channels in the input image. - out_channels (int, *optional*, defaults to 3): Number of channels in the output. - down_block_types (`Tuple[str]`, *optional*, defaults to : - obj:`("DownEncoderBlock2D",)`): Tuple of downsample block types. - up_block_types (`Tuple[str]`, *optional*, defaults to : - obj:`("UpDecoderBlock2D",)`): Tuple of upsample block types. - block_out_channels (`Tuple[int]`, *optional*, defaults to : - obj:`(64,)`): Tuple of block output channels. - act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. - latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space. - sample_size (`int`, *optional*, defaults to `32`): TODO - """ - - - def __init__( - self, - in_channels: int = 3, - out_channels: int = 3, - down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D","DownEncoderBlock2D",), - up_block_types: Tuple[str] = ("UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D",), - block_out_channels: Tuple[int] = (32, 64, 128, 128), - layers_per_block: int = 1, - act_fn: str = "silu", - latent_channels: int = 3, - norm_num_groups: int = 32, - sample_size: int = 32, - ): - super().__init__() - - # pass init params to Encoder - self.encoder = Encoder( - in_channels=in_channels, - out_channels=latent_channels, - down_block_types=down_block_types, - block_out_channels=block_out_channels, - layers_per_block=layers_per_block, - act_fn=act_fn, - norm_num_groups=norm_num_groups, - double_z=True, - ) - - # pass init params to Decoder - self.decoder = Decoder( - in_channels=latent_channels, - out_channels=out_channels, - up_block_types=up_block_types, - block_out_channels=block_out_channels, - layers_per_block=layers_per_block, - norm_num_groups=norm_num_groups, - act_fn=act_fn, - ) - - self.quant_conv = torch.nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) - self.post_quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1) - - def encode(self, x: torch.FloatTensor): - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z: torch.FloatTensor) -> torch.FloatTensor: - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward( - self, - sample: torch.FloatTensor, - sample_posterior: bool = True, - generator: Optional[torch.Generator] = None, - ) -> torch.FloatTensor: - r""" - Args: - sample (`torch.FloatTensor`): Input sample. - sample_posterior (`bool`, *optional*, defaults to `False`): - Whether to sample from the posterior. - """ - x = sample - posterior = self.encode(x) - if sample_posterior: - z = posterior.sample(generator=generator) - else: - z = posterior.mode() - kl_loss = posterior.kl() - dec = self.decode(z) - return dec, kl_loss - - - -class VQVAEWrapper(BasicModel): - def __init__( - self, - in_ch: int = 3, - out_ch: int = 3, - down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D",), - up_block_types: Tuple[str] = ("UpDecoderBlock2D","UpDecoderBlock2D","UpDecoderBlock2D",), - block_out_channels: Tuple[int] = (32, 64, 128, 256, ), - layers_per_block: int = 1, - act_fn: str = "silu", - latent_channels: int = 3, - sample_size: int = 32, - num_vq_embeddings: int = 64, - norm_num_groups: int = 32, - - optimizer=torch.optim.AdamW, - optimizer_kwargs={}, - lr_scheduler=None, - lr_scheduler_kwargs={}, - loss=torch.nn.MSELoss, - loss_kwargs={} - ): - super().__init__(optimizer, optimizer_kwargs, lr_scheduler, lr_scheduler_kwargs, loss, loss_kwargs) - self.model = VQModel(in_ch, out_ch, down_block_types, up_block_types, block_out_channels, - layers_per_block, act_fn, latent_channels, sample_size, num_vq_embeddings, norm_num_groups) - - def forward(self, sample): - return self.model(sample) - - def encode(self, x): - z = self.model.encode(x, return_loss=False) - return z - - def decode(self, z): - x = self.model.decode(z) - return x - - def _step(self, batch: dict, batch_idx: int, state: str, step: int, optimizer_idx:int): - # ------------------------- Get Source/Target --------------------------- - x = batch['source'] - target = x - - # ------------------------- Run Model --------------------------- - pred, vq_loss = self(x) - - # ------------------------- Compute Loss --------------------------- - loss = self.loss_fct(pred, target) - loss += vq_loss - - # --------------------- Compute Metrics ------------------------------- - results = {'loss':loss} - with torch.no_grad(): - results['L2'] = torch.nn.functional.mse_loss(pred, target) - results['L1'] = torch.nn.functional.l1_loss(pred, target) - - # ----------------- Log Scalars ---------------------- - for metric_name, metric_val in results.items(): - self.log(f"{state}/{metric_name}", metric_val, batch_size=x.shape[0], on_step=True, on_epoch=True) - - # ----------------- Save Image ------------------------------ - if self.global_step != 0 and self.global_step % self.trainer.log_every_n_steps == 0: - def norm(x): - return (x-x.min())/(x.max()-x.min()) - - images = [x, pred] - log_step = self.global_step // self.trainer.log_every_n_steps - path_out = Path(self.logger.log_dir)/'images' - path_out.mkdir(parents=True, exist_ok=True) - images = torch.cat([norm(img) for img in images]) - save_image(images, path_out/f'sample_{log_step}.png') - - return loss - -def hinge_d_loss(logits_real, logits_fake): - loss_real = torch.mean(F.relu(1. - logits_real)) - loss_fake = torch.mean(F.relu(1. + logits_fake)) - d_loss = 0.5 * (loss_real + loss_fake) - return d_loss - -def vanilla_d_loss(logits_real, logits_fake): - d_loss = 0.5 * ( - torch.mean(F.softplus(-logits_real)) + - torch.mean(F.softplus(logits_fake))) - return d_loss - -class VQGAN(BasicModel): - def __init__( - self, - in_ch: int = 3, - out_ch: int = 3, - down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D",), - up_block_types: Tuple[str] = ("UpDecoderBlock2D","UpDecoderBlock2D","UpDecoderBlock2D",), - block_out_channels: Tuple[int] = (32, 64, 128, 256, ), - layers_per_block: int = 1, - act_fn: str = "silu", - latent_channels: int = 3, - sample_size: int = 32, - num_vq_embeddings: int = 64, - norm_num_groups: int = 32, - - start_gan_train_step = 50000, # NOTE step increase with each optimizer - gan_loss_weight: float = 1.0, # alias discriminator - perceptual_loss_weight: float = 1.0, - embedding_loss_weight: float = 1.0, - - optimizer=torch.optim.AdamW, - optimizer_kwargs={}, - lr_scheduler=None, - lr_scheduler_kwargs={}, - loss=torch.nn.MSELoss, - loss_kwargs={} - ): - super().__init__(optimizer, optimizer_kwargs, lr_scheduler, lr_scheduler_kwargs, loss, loss_kwargs) - self.vqvae = VQModel(in_ch, out_ch, down_block_types, up_block_types, block_out_channels, layers_per_block, act_fn, - latent_channels, sample_size, num_vq_embeddings, norm_num_groups) - self.discriminator = NLayerDiscriminator(in_ch) - # self.perceiver = ... # Currently not supported, would require another trained NN - - self.start_gan_train_step = start_gan_train_step - self.perceptual_loss_weight = perceptual_loss_weight - self.gan_loss_weight = gan_loss_weight - self.embedding_loss_weight = embedding_loss_weight - - def forward(self, x, condition=None): - return self.vqvae(x) - - def encode(self, x): - z = self.vqvae.encode(x, return_loss=False) - return z - - def decode(self, z): - x = self.vqvae.decode(z) - return x - - - def compute_lambda(self, rec_loss, gan_loss, eps=1e-4): - """Computes adaptive weight as proposed in eq. 7 of https://arxiv.org/abs/2012.09841""" - last_layer = self.vqvae.decoder.conv_out.weight - rec_grads = torch.autograd.grad(rec_loss, last_layer, retain_graph=True)[0] - gan_grads = torch.autograd.grad(gan_loss, last_layer, retain_graph=True)[0] - d_weight = torch.norm(rec_grads) / (torch.norm(gan_grads) + eps) - d_weight = torch.clamp(d_weight, 0.0, 1e4) - return d_weight.detach() - - - - def _step(self, batch: dict, batch_idx: int, state: str, step: int, optimizer_idx:int): - x = batch['source'] - # condition = batch.get('target', None) - - pred, vq_emb_loss = self.vqvae(x) - - if optimizer_idx == 0: - # ------ VAE ------- - vq_img_loss = F.mse_loss(pred, x) - vq_per_loss = 0.0 #self.perceiver(pred, x) - rec_loss = vq_img_loss+self.perceptual_loss_weight*vq_per_loss - - # ------- GAN ----- - if step > self.start_gan_train_step: - gan_loss = -torch.mean(self.discriminator(pred)) - lambda_weight = self.compute_lambda(rec_loss, gan_loss) - gan_loss = gan_loss*lambda_weight - else: - gan_loss = torch.tensor([0.0], requires_grad=True, device=x.device) - - loss = self.gan_loss_weight*gan_loss+rec_loss+self.embedding_loss_weight*vq_emb_loss - - elif optimizer_idx == 1: - if step > self.start_gan_train_step//2: - logits_real = self.discriminator(x.detach()) - logits_fake = self.discriminator(pred.detach()) - loss = hinge_d_loss(logits_real, logits_fake) - else: - loss = torch.tensor([0.0], requires_grad=True, device=x.device) - - # --------------------- Compute Metrics ------------------------------- - results = {'loss':loss.detach(), f'loss_{optimizer_idx}':loss.detach()} - with torch.no_grad(): - results[f'L2'] = torch.nn.functional.mse_loss(pred, x) - results[f'L1'] = torch.nn.functional.l1_loss(pred, x) - - # ----------------- Log Scalars ---------------------- - for metric_name, metric_val in results.items(): - self.log(f"{state}/{metric_name}", metric_val, batch_size=x.shape[0], on_step=True, on_epoch=True) - - # ----------------- Save Image ------------------------------ - if self.global_step != 0 and self.global_step % self.trainer.log_every_n_steps == 0: # NOTE: step 1 (opt1) , step=2 (opt2), step=3 (opt1), ... - def norm(x): - return (x-x.min())/(x.max()-x.min()) - - images = torch.cat([x, pred]) - log_step = self.global_step // self.trainer.log_every_n_steps - path_out = Path(self.logger.log_dir)/'images' - path_out.mkdir(parents=True, exist_ok=True) - images = torch.stack([norm(img) for img in images]) - save_image(images, path_out/f'sample_{log_step}.png') - - return loss - - def configure_optimizers(self): - opt_vae = self.optimizer(self.vqvae.parameters(), **self.optimizer_kwargs) - opt_disc = self.optimizer(self.discriminator.parameters(), **self.optimizer_kwargs) - if self.lr_scheduler is not None: - scheduler = [ - { - 'scheduler': self.lr_scheduler(opt_vae, **self.lr_scheduler_kwargs), - 'interval': 'step', - 'frequency': 1 - }, - { - 'scheduler': self.lr_scheduler(opt_disc, **self.lr_scheduler_kwargs), - 'interval': 'step', - 'frequency': 1 - }, - ] - else: - scheduler = [] - - return [opt_vae, opt_disc], scheduler - -class VAEWrapper(BasicModel): - def __init__( - self, - in_ch: int = 3, - out_ch: int = 3, - down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"), # "DownEncoderBlock2D", "DownEncoderBlock2D", - up_block_types: Tuple[str] = ("UpDecoderBlock2D", "UpDecoderBlock2D","UpDecoderBlock2D" ), # "UpDecoderBlock2D", "UpDecoderBlock2D", - block_out_channels: Tuple[int] = (32, 64, 128, 256), # 128, 256 - layers_per_block: int = 1, - act_fn: str = "silu", - latent_channels: int = 3, - norm_num_groups: int = 32, - sample_size: int = 32, - - optimizer=torch.optim.AdamW, - optimizer_kwargs={'lr':1e-4, 'weight_decay':1e-3, 'amsgrad':True}, - lr_scheduler=None, - lr_scheduler_kwargs={}, - # loss=torch.nn.MSELoss, # WARNING: No Effect - # loss_kwargs={'reduction': 'mean'} - ): - super().__init__(optimizer, optimizer_kwargs, lr_scheduler, lr_scheduler_kwargs ) # loss, loss_kwargs - self.model = AutoencoderKL(in_ch, out_ch, down_block_types, up_block_types, block_out_channels, - layers_per_block, act_fn, latent_channels, norm_num_groups, sample_size) - - self.logvar = nn.Parameter(torch.zeros(size=())) # Better weighting between KL and MSE, see (https://arxiv.org/abs/1903.05789), also used by Taming-Transfomer/Stable Diffusion - - def forward(self, sample): - return self.model(sample) - - def encode(self, x): - z = self.model.encode(x) # Latent space but not yet mapped to discrete embedding vectors - return z.sample(generator=None) - - def decode(self, z): - x = self.model.decode(z) - return x - - def _step(self, batch: dict, batch_idx: int, state: str, step: int, optimizer_idx:int): - # ------------------------- Get Source/Target --------------------------- - x = batch['source'] - target = x - HALF_LOG_TWO_PI = 0.91893 # log(2pi)/2 - - # ------------------------- Run Model --------------------------- - pred, kl_loss = self(x) - - # ------------------------- Compute Loss --------------------------- - loss = torch.sum( torch.square(pred-target))/x.shape[0] #torch.sum( torch.square((pred-target)/torch.exp(self.logvar))/2 + self.logvar + HALF_LOG_TWO_PI )/x.shape[0] - loss += kl_loss - - # --------------------- Compute Metrics ------------------------------- - results = {'loss':loss.detach()} - with torch.no_grad(): - results['L2'] = torch.nn.functional.mse_loss(pred, target) - results['L1'] = torch.nn.functional.l1_loss(pred, target) - - # ----------------- Log Scalars ---------------------- - for metric_name, metric_val in results.items(): - self.log(f"{state}/{metric_name}", metric_val, batch_size=x.shape[0], on_step=True, on_epoch=True) - - # ----------------- Save Image ------------------------------ - if self.global_step != 0 and self.global_step % self.trainer.log_every_n_steps == 0: - def norm(x): - return (x-x.min())/(x.max()-x.min()) - - images = torch.cat([x, pred]) - log_step = self.global_step // self.trainer.log_every_n_steps - path_out = Path(self.logger.log_dir)/'images' - path_out.mkdir(parents=True, exist_ok=True) - images = torch.stack([norm(img) for img in images]) - save_image(images, path_out/f'sample_{log_step}.png') - - return loss \ No newline at end of file diff --git a/spaces/multimodalart/latentdiffusion/latent-diffusion/ldm/models/autoencoder.py b/spaces/multimodalart/latentdiffusion/latent-diffusion/ldm/models/autoencoder.py deleted file mode 100644 index 6a9c4f45498561953b8085981609b2a3298a5473..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/latentdiffusion/latent-diffusion/ldm/models/autoencoder.py +++ /dev/null @@ -1,443 +0,0 @@ -import torch -import pytorch_lightning as pl -import torch.nn.functional as F -from contextlib import contextmanager - -from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer - -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.modules.distributions.distributions import DiagonalGaussianDistribution - -from ldm.util import instantiate_from_config - - -class VQModel(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - n_embed, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - batch_resize_range=None, - scheduler_config=None, - lr_g_factor=1.0, - remap=None, - sane_index_shape=False, # tell vector quantizer to return indices as bhw - use_ema=False - ): - super().__init__() - self.embed_dim = embed_dim - self.n_embed = n_embed - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, - remap=remap, - sane_index_shape=sane_index_shape) - self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - self.batch_resize_range = batch_resize_range - if self.batch_resize_range is not None: - print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") - - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - self.scheduler_config = scheduler_config - self.lr_g_factor = lr_g_factor - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.parameters()) - self.model_ema.copy_to(self) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - print(f"Unexpected Keys: {unexpected}") - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self) - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - quant, emb_loss, info = self.quantize(h) - return quant, emb_loss, info - - def encode_to_prequant(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, quant): - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - def decode_code(self, code_b): - quant_b = self.quantize.embed_code(code_b) - dec = self.decode(quant_b) - return dec - - def forward(self, input, return_pred_indices=False): - quant, diff, (_,_,ind) = self.encode(input) - dec = self.decode(quant) - if return_pred_indices: - return dec, diff, ind - return dec, diff - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - if self.batch_resize_range is not None: - lower_size = self.batch_resize_range[0] - upper_size = self.batch_resize_range[1] - if self.global_step <= 4: - # do the first few batches with max size to avoid later oom - new_resize = upper_size - else: - new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) - if new_resize != x.shape[2]: - x = F.interpolate(x, size=new_resize, mode="bicubic") - x = x.detach() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - # https://github.com/pytorch/pytorch/issues/37142 - # try not to fool the heuristics - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - - if optimizer_idx == 0: - # autoencode - aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train", - predicted_indices=ind) - - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return aeloss - - if optimizer_idx == 1: - # discriminator - discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return discloss - - def validation_step(self, batch, batch_idx): - log_dict = self._validation_step(batch, batch_idx) - with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") - return log_dict - - def _validation_step(self, batch, batch_idx, suffix=""): - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - - discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] - self.log(f"val{suffix}/rec_loss", rec_loss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - self.log(f"val{suffix}/aeloss", aeloss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - if version.parse(pl.__version__) >= version.parse('1.4.0'): - del log_dict_ae[f"val{suffix}/rec_loss"] - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr_d = self.learning_rate - lr_g = self.lr_g_factor*self.learning_rate - print("lr_d", lr_d) - print("lr_g", lr_g) - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quantize.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr_g, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr_d, betas=(0.5, 0.9)) - - if self.scheduler_config is not None: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - { - 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - ] - return [opt_ae, opt_disc], scheduler - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if only_inputs: - log["inputs"] = x - return log - xrec, _ = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["inputs"] = x - log["reconstructions"] = xrec - if plot_ema: - with self.ema_scope(): - xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) - log["reconstructions_ema"] = xrec_ema - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class VQModelInterface(VQModel): - def __init__(self, embed_dim, *args, **kwargs): - super().__init__(embed_dim=embed_dim, *args, **kwargs) - self.embed_dim = embed_dim - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, h, force_not_quantize=False): - # also go through quantization layer - if not force_not_quantize: - quant, emb_loss, info = self.quantize(h) - else: - quant = h - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - -class AutoencoderKL(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - ): - super().__init__() - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - assert ddconfig["double_z"] - self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - self.embed_dim = embed_dim - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - self.load_state_dict(sd, strict=False) - print(f"Restored from {path}") - - def encode(self, x): - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - dec = self.decode(z) - return dec, posterior - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - - if optimizer_idx == 0: - # train encoder+decoder+logvar - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return aeloss - - if optimizer_idx == 1: - # train the discriminator - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - - self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return discloss - - def validation_step(self, batch, batch_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, - last_layer=self.get_last_layer(), split="val") - - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, - last_layer=self.get_last_layer(), split="val") - - self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr = self.learning_rate - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr, betas=(0.5, 0.9)) - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - @torch.no_grad() - def log_images(self, batch, only_inputs=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if not only_inputs: - xrec, posterior = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["samples"] = self.decode(torch.randn_like(posterior.sample())) - log["reconstructions"] = xrec - log["inputs"] = x - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class IdentityFirstStage(torch.nn.Module): - def __init__(self, *args, vq_interface=False, **kwargs): - self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff - super().__init__() - - def encode(self, x, *args, **kwargs): - return x - - def decode(self, x, *args, **kwargs): - return x - - def quantize(self, x, *args, **kwargs): - if self.vq_interface: - return x, None, [None, None, None] - return x - - def forward(self, x, *args, **kwargs): - return x diff --git a/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/layers_WS.py b/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/layers_WS.py deleted file mode 100644 index 51085989c4f090d4dc5f599be3c550d16ec0b2e7..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/layers_WS.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Modified by Nikita Selin (OPHoperHPO)[https://github.com/OPHoperHPO]. -Source url: https://github.com/MarcoForte/FBA_Matting -License: MIT License -""" -import torch -import torch.nn as nn -from torch.nn import functional as F - - -class Conv2d(nn.Conv2d): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True, - ): - super(Conv2d, self).__init__( - in_channels, - out_channels, - kernel_size, - stride, - padding, - dilation, - groups, - bias, - ) - - def forward(self, x): - # return super(Conv2d, self).forward(x) - weight = self.weight - weight_mean = ( - weight.mean(dim=1, keepdim=True) - .mean(dim=2, keepdim=True) - .mean(dim=3, keepdim=True) - ) - weight = weight - weight_mean - # std = (weight).view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5 - std = ( - torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view( - -1, 1, 1, 1 - ) - + 1e-5 - ) - weight = weight / std.expand_as(weight) - return F.conv2d( - x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups - ) - - -def BatchNorm2d(num_features): - return nn.GroupNorm(num_channels=num_features, num_groups=32) diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/ffc.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/ffc.py deleted file mode 100644 index 2f8aeb1411fc1537916275fd3243706cc74b8d3c..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/ffc.py +++ /dev/null @@ -1,433 +0,0 @@ -# Fast Fourier Convolution NeurIPS 2020 -# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py -# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from saicinpainting.training.modules.base import get_activation, BaseDiscriminator -from saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper -from saicinpainting.training.modules.squeeze_excitation import SELayer -from saicinpainting.utils import get_shape - - -class FFCSE_block(nn.Module): - - def __init__(self, channels, ratio_g): - super(FFCSE_block, self).__init__() - in_cg = int(channels * ratio_g) - in_cl = channels - in_cg - r = 16 - - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.conv1 = nn.Conv2d(channels, channels // r, - kernel_size=1, bias=True) - self.relu1 = nn.ReLU(inplace=True) - self.conv_a2l = None if in_cl == 0 else nn.Conv2d( - channels // r, in_cl, kernel_size=1, bias=True) - self.conv_a2g = None if in_cg == 0 else nn.Conv2d( - channels // r, in_cg, kernel_size=1, bias=True) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - x = x if type(x) is tuple else (x, 0) - id_l, id_g = x - - x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1) - x = self.avgpool(x) - x = self.relu1(self.conv1(x)) - - x_l = 0 if self.conv_a2l is None else id_l * \ - self.sigmoid(self.conv_a2l(x)) - x_g = 0 if self.conv_a2g is None else id_g * \ - self.sigmoid(self.conv_a2g(x)) - return x_l, x_g - - -class FourierUnit(nn.Module): - - def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear', - spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'): - # bn_layer not used - super(FourierUnit, self).__init__() - self.groups = groups - - self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0), - out_channels=out_channels * 2, - kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False) - self.bn = torch.nn.BatchNorm2d(out_channels * 2) - self.relu = torch.nn.ReLU(inplace=True) - - # squeeze and excitation block - self.use_se = use_se - if use_se: - if se_kwargs is None: - se_kwargs = {} - self.se = SELayer(self.conv_layer.in_channels, **se_kwargs) - - self.spatial_scale_factor = spatial_scale_factor - self.spatial_scale_mode = spatial_scale_mode - self.spectral_pos_encoding = spectral_pos_encoding - self.ffc3d = ffc3d - self.fft_norm = fft_norm - - def forward(self, x): - batch = x.shape[0] - - if self.spatial_scale_factor is not None: - orig_size = x.shape[-2:] - x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False) - - r_size = x.size() - # (batch, c, h, w/2+1, 2) - fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) - ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) - ffted = torch.stack((ffted.real, ffted.imag), dim=-1) - ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) - ffted = ffted.view((batch, -1,) + ffted.size()[3:]) - - if self.spectral_pos_encoding: - height, width = ffted.shape[-2:] - coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted) - coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted) - ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1) - - if self.use_se: - ffted = self.se(ffted) - - ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1) - ffted = self.relu(self.bn(ffted)) - - ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute( - 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2) - ffted = torch.complex(ffted[..., 0], ffted[..., 1]) - - ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] - output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) - - if self.spatial_scale_factor is not None: - output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False) - - return output - - -class SpectralTransform(nn.Module): - - def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, **fu_kwargs): - # bn_layer not used - super(SpectralTransform, self).__init__() - self.enable_lfu = enable_lfu - if stride == 2: - self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) - else: - self.downsample = nn.Identity() - - self.stride = stride - self.conv1 = nn.Sequential( - nn.Conv2d(in_channels, out_channels // - 2, kernel_size=1, groups=groups, bias=False), - nn.BatchNorm2d(out_channels // 2), - nn.ReLU(inplace=True) - ) - self.fu = FourierUnit( - out_channels // 2, out_channels // 2, groups, **fu_kwargs) - if self.enable_lfu: - self.lfu = FourierUnit( - out_channels // 2, out_channels // 2, groups) - self.conv2 = torch.nn.Conv2d( - out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False) - - def forward(self, x): - - x = self.downsample(x) - x = self.conv1(x) - output = self.fu(x) - - if self.enable_lfu: - n, c, h, w = x.shape - split_no = 2 - split_s = h // split_no - xs = torch.cat(torch.split( - x[:, :c // 4], split_s, dim=-2), dim=1).contiguous() - xs = torch.cat(torch.split(xs, split_s, dim=-1), - dim=1).contiguous() - xs = self.lfu(xs) - xs = xs.repeat(1, 1, split_no, split_no).contiguous() - else: - xs = 0 - - output = self.conv2(x + output + xs) - - return output - - -class FFC(nn.Module): - - def __init__(self, in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride=1, padding=0, - dilation=1, groups=1, bias=False, enable_lfu=True, - padding_type='reflect', gated=False, **spectral_kwargs): - super(FFC, self).__init__() - - assert stride == 1 or stride == 2, "Stride should be 1 or 2." - self.stride = stride - - in_cg = int(in_channels * ratio_gin) - in_cl = in_channels - in_cg - out_cg = int(out_channels * ratio_gout) - out_cl = out_channels - out_cg - #groups_g = 1 if groups == 1 else int(groups * ratio_gout) - #groups_l = 1 if groups == 1 else groups - groups_g - - self.ratio_gin = ratio_gin - self.ratio_gout = ratio_gout - self.global_in_num = in_cg - - module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d - self.convl2l = module(in_cl, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d - self.convl2g = module(in_cl, out_cg, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d - self.convg2l = module(in_cg, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform - self.convg2g = module( - in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs) - - self.gated = gated - module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d - self.gate = module(in_channels, 2, 1) - - def forward(self, x): - x_l, x_g = x if type(x) is tuple else (x, 0) - out_xl, out_xg = 0, 0 - - if self.gated: - total_input_parts = [x_l] - if torch.is_tensor(x_g): - total_input_parts.append(x_g) - total_input = torch.cat(total_input_parts, dim=1) - - gates = torch.sigmoid(self.gate(total_input)) - g2l_gate, l2g_gate = gates.chunk(2, dim=1) - else: - g2l_gate, l2g_gate = 1, 1 - - if self.ratio_gout != 1: - out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate - if self.ratio_gout != 0: - out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g) - - return out_xl, out_xg - - -class FFC_BN_ACT(nn.Module): - - def __init__(self, in_channels, out_channels, - kernel_size, ratio_gin, ratio_gout, - stride=1, padding=0, dilation=1, groups=1, bias=False, - norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity, - padding_type='reflect', - enable_lfu=True, **kwargs): - super(FFC_BN_ACT, self).__init__() - self.ffc = FFC(in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride, padding, dilation, - groups, bias, enable_lfu, padding_type=padding_type, **kwargs) - lnorm = nn.Identity if ratio_gout == 1 else norm_layer - gnorm = nn.Identity if ratio_gout == 0 else norm_layer - global_channels = int(out_channels * ratio_gout) - self.bn_l = lnorm(out_channels - global_channels) - self.bn_g = gnorm(global_channels) - - lact = nn.Identity if ratio_gout == 1 else activation_layer - gact = nn.Identity if ratio_gout == 0 else activation_layer - self.act_l = lact(inplace=True) - self.act_g = gact(inplace=True) - - def forward(self, x): - x_l, x_g = self.ffc(x) - x_l = self.act_l(self.bn_l(x_l)) - x_g = self.act_g(self.bn_g(x_g)) - return x_l, x_g - - -class FFCResnetBlock(nn.Module): - def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1, - spatial_transform_kwargs=None, inline=False, **conv_kwargs): - super().__init__() - self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - if spatial_transform_kwargs is not None: - self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs) - self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs) - self.inline = inline - - def forward(self, x): - if self.inline: - x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:] - else: - x_l, x_g = x if type(x) is tuple else (x, 0) - - id_l, id_g = x_l, x_g - - x_l, x_g = self.conv1((x_l, x_g)) - x_l, x_g = self.conv2((x_l, x_g)) - - x_l, x_g = id_l + x_l, id_g + x_g - out = x_l, x_g - if self.inline: - out = torch.cat(out, dim=1) - return out - - -class ConcatTupleLayer(nn.Module): - def forward(self, x): - assert isinstance(x, tuple) - x_l, x_g = x - assert torch.is_tensor(x_l) or torch.is_tensor(x_g) - if not torch.is_tensor(x_g): - return x_l - return torch.cat(x, dim=1) - - -class FFCResNetGenerator(nn.Module): - def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - padding_type='reflect', activation_layer=nn.ReLU, - up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), - init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={}, - spatial_transform_layers=None, spatial_transform_kwargs={}, - add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}): - assert (n_blocks >= 0) - super().__init__() - - model = [nn.ReflectionPad2d(3), - FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer, - activation_layer=activation_layer, **init_conv_kwargs)] - - ### downsample - for i in range(n_downsampling): - mult = 2 ** i - if i == n_downsampling - 1: - cur_conv_kwargs = dict(downsample_conv_kwargs) - cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0) - else: - cur_conv_kwargs = downsample_conv_kwargs - model += [FFC_BN_ACT(min(max_features, ngf * mult), - min(max_features, ngf * mult * 2), - kernel_size=3, stride=2, padding=1, - norm_layer=norm_layer, - activation_layer=activation_layer, - **cur_conv_kwargs)] - - mult = 2 ** n_downsampling - feats_num_bottleneck = min(max_features, ngf * mult) - - ### resnet blocks - for i in range(n_blocks): - cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, **resnet_conv_kwargs) - if spatial_transform_layers is not None and i in spatial_transform_layers: - cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs) - model += [cur_resblock] - - model += [ConcatTupleLayer()] - - ### upsample - for i in range(n_downsampling): - mult = 2 ** (n_downsampling - i) - model += [nn.ConvTranspose2d(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=2, padding=1, output_padding=1), - up_norm_layer(min(max_features, int(ngf * mult / 2))), - up_activation] - - if out_ffc: - model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, inline=True, **out_ffc_kwargs)] - - model += [nn.ReflectionPad2d(3), - nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] - if add_out_act: - model.append(get_activation('tanh' if add_out_act is True else add_out_act)) - self.model = nn.Sequential(*model) - - def forward(self, input): - return self.model(input) - - -class FFCNLayerDiscriminator(BaseDiscriminator): - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512, - init_conv_kwargs={}, conv_kwargs={}): - super().__init__() - self.n_layers = n_layers - - def _act_ctor(inplace=True): - return nn.LeakyReLU(negative_slope=0.2, inplace=inplace) - - kw = 3 - padw = int(np.ceil((kw-1.0)/2)) - sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer, - activation_layer=_act_ctor, **init_conv_kwargs)]] - - nf = ndf - for n in range(1, n_layers): - nf_prev = nf - nf = min(nf * 2, max_features) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=2, padding=padw, - norm_layer=norm_layer, - activation_layer=_act_ctor, - **conv_kwargs) - ] - sequence.append(cur_model) - - nf_prev = nf - nf = min(nf * 2, 512) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=1, padding=padw, - norm_layer=norm_layer, - activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs), - **conv_kwargs), - ConcatTupleLayer() - ] - sequence.append(cur_model) - - sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] - - for n in range(len(sequence)): - setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) - - def get_all_activations(self, x): - res = [x] - for n in range(self.n_layers + 2): - model = getattr(self, 'model' + str(n)) - res.append(model(res[-1])) - return res[1:] - - def forward(self, x): - act = self.get_all_activations(x) - feats = [] - for out in act[:-1]: - if isinstance(out, tuple): - if torch.is_tensor(out[1]): - out = torch.cat(out, dim=1) - else: - out = out[0] - feats.append(out) - return act[-1], feats diff --git a/spaces/nateraw/dockerplayground/start_server.sh b/spaces/nateraw/dockerplayground/start_server.sh deleted file mode 100644 index 01ae830fd64c84e2f82d64836d3ef8e42e712361..0000000000000000000000000000000000000000 --- a/spaces/nateraw/dockerplayground/start_server.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -echo "Starting Jupyter Lab with token $JUPYTER_TOKEN" - -jupyter-lab \ - --ip 0.0.0.0 \ - --port 7860 \ - --no-browser \ - --allow-root \ - --ServerApp.token="$JUPYTER_TOKEN" \ - --ServerApp.tornado_settings="{'headers': {'Content-Security-Policy': 'frame-ancestors *'}}" \ - --ServerApp.disable_check_xsrf=True \ - --ServerApp.cookie_options="{'SameSite': 'None', 'Secure': True}" \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Office 2013 Ita Preattivato Torrent.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Office 2013 Ita Preattivato Torrent.md deleted file mode 100644 index 26d48a17e4cb8cc24d5c2333082e2d7ff0a5370e..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Office 2013 Ita Preattivato Torrent.md +++ /dev/null @@ -1,20 +0,0 @@ -
      -

      How to Download and Install Office 2013 ITA Preattivato Torrent

      -

      Office 2013 is a popular productivity suite that includes applications such as Word, Excel, PowerPoint, Outlook, and more. If you want to use Office 2013 in Italian, you can download and install a pre-activated version from a torrent site. However, you should be aware of the risks and legal issues involved in using pirated software.

      -

      In this article, we will show you how to download and install Office 2013 ITA preattivato torrent safely and easily. Follow these steps:

      -

      office 2013 ita preattivato torrent


      Downloadhttps://urlcod.com/2uI9O3



      -
        -
      1. Find a reliable torrent site that offers Office 2013 ITA preattivato torrent. You can use a search engine or a torrent aggregator to find one. Some examples are The Pirate Bay, Kickass Torrents, and RARBG.
      2. -
      3. Download a torrent client that can handle magnet links. A torrent client is a software that allows you to download files from other users who are sharing them. Some examples are uTorrent, BitTorrent, and qBittorrent.
      4. -
      5. Open the torrent site and search for Office 2013 ITA preattivato torrent. You should see a list of results with different file sizes and seeders. Seeders are users who have the complete file and are uploading it to others. Choose the one with the most seeders and the smallest file size.
      6. -
      7. Click on the magnet link or the download button to start downloading the torrent file. A magnet link is a URL that contains information about the file, such as its name, size, and hash. A download button will download a small file that contains the same information.
      8. -
      9. Open the torrent file with your torrent client. It will automatically connect to other users who have the file and start downloading it. You can monitor the progress and speed of the download in your torrent client.
      10. -
      11. Once the download is complete, you will have a folder with the Office 2013 ITA preattivato files. You can open it and run the setup.exe file to install Office 2013 on your computer. You do not need to enter any product key or activation code as it is already pre-activated.
      12. -
      -

      Congratulations! You have successfully downloaded and installed Office 2013 ITA preattivato torrent on your computer. You can now enjoy using Office 2013 in Italian for free.

      -

      However, you should also be aware of the potential risks and legal issues involved in using pirated software. Pirated software may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Pirated software may also not receive updates or support from Microsoft, which can affect its performance and security. Pirated software may also violate the intellectual property rights of Microsoft and other software developers, which can result in legal consequences or fines.

      -

      -

      Therefore, we recommend that you use only genuine and licensed software from official sources. You can purchase Office 2013 from Microsoft's website or authorized resellers. You can also use alternative productivity suites that are free and legal, such as LibreOffice, OpenOffice, or Google Docs.

      -

      We hope this article was helpful and informative. Thank you for reading!

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/nikhedward/TL-DR_summarize_it/app.py b/spaces/nikhedward/TL-DR_summarize_it/app.py deleted file mode 100644 index 408752af3cfe79321083f7fd4f7bea8f23ff2ff8..0000000000000000000000000000000000000000 --- a/spaces/nikhedward/TL-DR_summarize_it/app.py +++ /dev/null @@ -1,49 +0,0 @@ -import gradio as gr -import transformers -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM - -title = " Text Summarizer 📝" - - -text_1 = """ -Miss Brill is the story of an old woman told brilliantly and realistically, balancing thoughts and emotions that sustain her late solitary life amidst all the bustle of modern life. Miss Brill is a regular visitor on Sundays to the Jardins Publiques (the Public Gardens) of a small French suburb where she sits and watches all sorts of people come and go. She listens to the band playing, loves to watch people and guess what keeps them going, and enjoys contemplating the world as a great stage upon which actors perform. She finds herself to be another actor among the so many she sees, or at least herself as 'part of the performance after all.' One Sunday Miss Brill puts on her fur and goes to the Public Gardens as usual. The evening ends with her sudden realization that she is old and lonely, a realization brought to her by a conversation she overhears between a boy and a girl, presumably lovers, who comment on her unwelcome presence in their vicinity. Miss Brill is sad and depressed as she returns home, not stopping by as usual to buy her Sunday delicacy, a slice of honey-cake. She retires to her dark room, puts the fur back into the box and imagines that she has heard something cry. -""" - -text_2 = """ -Senior British royals, including Prince William and his wife, Duchess Kate, went to church on Easter Sunday without the queen. Queen Elizabeth II, who has been experiencing mobility problems, did not attend the service at St. George's Chapel on the grounds of Windsor Castle, a fixture in the royals' calendar. William and Kate, known as the Duke and Duchess of Cambridge, were accompanied by two of their three children: Prince George, 8, and Princess Charlotte, 6. Also in attendance were the queen's youngest son, Prince Edward, with his wife Sophie and their children, and Princess Eugenie, the daughter of Prince Andrew. Last week, she had a visit from her grandson Prince Harry and his wife Meghan, a spokesperson for the couple confirmed to USA TODAY – the first time the couple has visited the U.K. together since they stepped down as working royals in 2020 and moved to California. - -""" -text_3 = """ -In the article “Bats,” by Debbie Dean, we learn that in contrast to some mistaken beliefs, bats have sight, are mammals, and are not especially likely to carry rabies. Bats are relatively misunderstood and unappreciated. Bats have some interesting physical features. They have similar bone structure and skeletons to that of humans, so they are not winged rodents. They are color blind, so they use echolocation if there is not sufficient light. Otherwise, their sight is enough. Species of bats total about a thousand. The species come in a variety of sizes and have unique diets. Most eat insects, but some eat plant products and small animals. However, vampire bats drink blood, which can be harmful to livestock. Farmers have accidentally killed many helpful bats while trying to rid themselves of vampire bats. Bats can actually be helpful to humans. They destroy unwanted bugs, spread fruit seeds, and pollinate plants. However, the survival of bats is not known because many are killed by human disruptions and predators. The bat population has dropped steadily and may continue to drop. Hopefully, we will realize that although bats look different than our favorite animals, we -can learn to accept and admire their uniqueness -""" - - - - -sample_texts = [[text_1], [text_2], [text_3]] - -desc = """ -

      This is an abstractive text summarizer app using fine-tuned bart-large-cnn model. The abstractive approach involves rephrasing the complete document while capturing the complete meaning of the document. This type of summarization provides more human-like summary. -

      Note: For faster summaries input smaller texts. Current model supports context size of 1024 words, and anything longer is truncated -

      Sample text inputs are provided at the bottom!

      -""" - - - -model_name = "nikhedward/bart-large-cnn-finetuned-multi-news" -tokenizer = AutoTokenizer.from_pretrained(model_name) -model = AutoModelForSeq2SeqLM.from_pretrained(model_name) - -def auto_summarize(inp): - inp = inp.replace('\n','') - inp = tokenizer.encode(inp, return_tensors='pt', max_length=1024, truncation=True) - summary_ids = model.generate(inp, num_beams=4, max_length=150, early_stopping=True, do_sample=True, top_k=50, top_p=0.95) - summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) - return summary - -interface = gr.Interface(fn=auto_summarize, inputs=gr.inputs.Textbox(lines=10, label="Input Text"), description = desc, theme = "dark-peach", -examples = sample_texts, title = title, outputs="text", css=".footer{display:none !important}") - -interface.launch() - diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/README.md b/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/README.md deleted file mode 100644 index 912cc29927542bfe4258d3208cf52d73cb0ea477..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/README.md +++ /dev/null @@ -1,6 +0,0 @@ -This directory provides definitions for a few common models, dataloaders, scheduler, -and optimizers that are often used in training. -The definition of these objects are provided in the form of lazy instantiation: -their arguments can be edited by users before constructing the objects. - -They can be imported, or loaded by `model_zoo.get_config` API in users' own configs. diff --git a/spaces/nupurkmr9/concept-ablation/__init__.py b/spaces/nupurkmr9/concept-ablation/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/oliver2023/chatgpt-on-wechat/channel/wechat/wechaty_channel.py b/spaces/oliver2023/chatgpt-on-wechat/channel/wechat/wechaty_channel.py deleted file mode 100644 index 65348bacc96c46f0e9b9eee38dc062de3805bf56..0000000000000000000000000000000000000000 --- a/spaces/oliver2023/chatgpt-on-wechat/channel/wechat/wechaty_channel.py +++ /dev/null @@ -1,125 +0,0 @@ -# encoding:utf-8 - -""" -wechaty channel -Python Wechaty - https://github.com/wechaty/python-wechaty -""" -import base64 -import os -import time -import asyncio -from bridge.context import Context -from wechaty_puppet import FileBox -from wechaty import Wechaty, Contact -from wechaty.user import Message -from bridge.reply import * -from bridge.context import * -from channel.chat_channel import ChatChannel -from channel.wechat.wechaty_message import WechatyMessage -from common.log import logger -from common.singleton import singleton -from config import conf -try: - from voice.audio_convert import any_to_sil -except Exception as e: - pass - -@singleton -class WechatyChannel(ChatChannel): - NOT_SUPPORT_REPLYTYPE = [] - def __init__(self): - super().__init__() - - def startup(self): - config = conf() - token = config.get('wechaty_puppet_service_token') - os.environ['WECHATY_PUPPET_SERVICE_TOKEN'] = token - asyncio.run(self.main()) - - async def main(self): - - loop = asyncio.get_event_loop() - #将asyncio的loop传入处理线程 - self.handler_pool._initializer= lambda: asyncio.set_event_loop(loop) - self.bot = Wechaty() - self.bot.on('login', self.on_login) - self.bot.on('message', self.on_message) - await self.bot.start() - - async def on_login(self, contact: Contact): - self.user_id = contact.contact_id - self.name = contact.name - logger.info('[WX] login user={}'.format(contact)) - - # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息 - def send(self, reply: Reply, context: Context): - receiver_id = context['receiver'] - loop = asyncio.get_event_loop() - if context['isgroup']: - receiver = asyncio.run_coroutine_threadsafe(self.bot.Room.find(receiver_id),loop).result() - else: - receiver = asyncio.run_coroutine_threadsafe(self.bot.Contact.find(receiver_id),loop).result() - msg = None - if reply.type == ReplyType.TEXT: - msg = reply.content - asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result() - logger.info('[WX] sendMsg={}, receiver={}'.format(reply, receiver)) - elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO: - msg = reply.content - asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result() - logger.info('[WX] sendMsg={}, receiver={}'.format(reply, receiver)) - elif reply.type == ReplyType.VOICE: - voiceLength = None - file_path = reply.content - sil_file = os.path.splitext(file_path)[0] + '.sil' - voiceLength = int(any_to_sil(file_path, sil_file)) - if voiceLength >= 60000: - voiceLength = 60000 - logger.info('[WX] voice too long, length={}, set to 60s'.format(voiceLength)) - # 发送语音 - t = int(time.time()) - msg = FileBox.from_file(sil_file, name=str(t) + '.sil') - if voiceLength is not None: - msg.metadata['voiceLength'] = voiceLength - asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result() - try: - os.remove(file_path) - if sil_file != file_path: - os.remove(sil_file) - except Exception as e: - pass - logger.info('[WX] sendVoice={}, receiver={}'.format(reply.content, receiver)) - elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片 - img_url = reply.content - t = int(time.time()) - msg = FileBox.from_url(url=img_url, name=str(t) + '.png') - asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result() - logger.info('[WX] sendImage url={}, receiver={}'.format(img_url,receiver)) - elif reply.type == ReplyType.IMAGE: # 从文件读取图片 - image_storage = reply.content - image_storage.seek(0) - t = int(time.time()) - msg = FileBox.from_base64(base64.b64encode(image_storage.read()), str(t) + '.png') - asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result() - logger.info('[WX] sendImage, receiver={}'.format(receiver)) - - async def on_message(self, msg: Message): - """ - listen for message event - """ - try: - cmsg = await WechatyMessage(msg) - except NotImplementedError as e: - logger.debug('[WX] {}'.format(e)) - return - except Exception as e: - logger.exception('[WX] {}'.format(e)) - return - logger.debug('[WX] message:{}'.format(cmsg)) - room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None - isgroup = room is not None - ctype = cmsg.ctype - context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg) - if context: - logger.info('[WX] receiveMsg={}, context={}'.format(cmsg, context)) - self.produce(context) \ No newline at end of file diff --git a/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/README.md b/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/README.md deleted file mode 100644 index 0bf50653704091fd4cb182b46801624d8fd2b810..0000000000000000000000000000000000000000 --- a/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/README.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Manticore 13B Chat -emoji: 🏃 -colorFrom: blue -colorTo: gray -sdk: gradio -sdk_version: 3.29.0 -app_file: tabbed.py -pinned: false -duplicated_from: openaccess-ai-collective/ggml-ui ---- - -# GGML UI Inference w/ HuggingFace Spaces - -- Fork this space to use your own GGML models. Simply update the [./config.yml](./config.yml) -- Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui) - -Brought to you by [OpenAccess AI Collective](https://github.com/OpenAccess-AI-Collective) \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/installation.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/installation.md deleted file mode 100644 index 4a9146a22620699a7faabb45844809be581a4d7a..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/installation.md +++ /dev/null @@ -1,142 +0,0 @@ - - -# 설치 - -사용하시는 라이브러리에 맞는 🤗 Diffusers를 설치하세요. - -🤗 Diffusers는 Python 3.8+, PyTorch 1.7.0+ 및 flax에서 테스트되었습니다. 사용중인 딥러닝 라이브러리에 대한 아래의 설치 안내를 따르세요. - -- [PyTorch 설치 안내](https://pytorch.org/get-started/locally/) -- [Flax 설치 안내](https://flax.readthedocs.io/en/latest/) - -## pip를 이용한 설치 - -[가상 환경](https://docs.python.org/3/library/venv.html)에 🤗 Diffusers를 설치해야 합니다. -Python 가상 환경에 익숙하지 않은 경우 [가상환경 pip 설치 가이드](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)를 살펴보세요. -가상 환경을 사용하면 서로 다른 프로젝트를 더 쉽게 관리하고, 종속성간의 호환성 문제를 피할 수 있습니다. - -프로젝트 디렉토리에 가상 환경을 생성하는 것으로 시작하세요: - -```bash -python -m venv .env -``` - -그리고 가상 환경을 활성화합니다: - -```bash -source .env/bin/activate -``` - -이제 다음의 명령어로 🤗 Diffusers를 설치할 준비가 되었습니다: - -**PyTorch의 경우** - -```bash -pip install diffusers["torch"] -``` - -**Flax의 경우** - -```bash -pip install diffusers["flax"] -``` - -## 소스로부터 설치 - -소스에서 `diffusers`를 설치하기 전에, `torch` 및 `accelerate`이 설치되어 있는지 확인하세요. - -`torch` 설치에 대해서는 [torch docs](https://pytorch.org/get-started/locally/#start-locally)를 참고하세요. - -다음과 같이 `accelerate`을 설치하세요. - -```bash -pip install accelerate -``` - -다음 명령어를 사용하여 소스에서 🤗 Diffusers를 설치하세요: - -```bash -pip install git+https://github.com/huggingface/diffusers -``` - -이 명령어는 최신 `stable` 버전이 아닌 최첨단 `main` 버전을 설치합니다. -`main` 버전은 최신 개발 정보를 최신 상태로 유지하는 데 유용합니다. -예를 들어 마지막 공식 릴리즈 이후 버그가 수정되었지만, 새 릴리즈가 아직 출시되지 않은 경우입니다. -그러나 이는 `main` 버전이 항상 안정적이지 않을 수 있음을 의미합니다. -우리는 `main` 버전이 지속적으로 작동하도록 노력하고 있으며, 대부분의 문제는 보통 몇 시간 또는 하루 안에 해결됩니다. -문제가 발생하면 더 빨리 해결할 수 있도록 [Issue](https://github.com/huggingface/transformers/issues)를 열어주세요! - - -## 편집가능한 설치 - -다음을 수행하려면 편집가능한 설치가 필요합니다: - -* 소스 코드의 `main` 버전을 사용 -* 🤗 Diffusers에 기여 (코드의 변경 사항을 테스트하기 위해 필요) - -저장소를 복제하고 다음 명령어를 사용하여 🤗 Diffusers를 설치합니다: - -```bash -git clone https://github.com/huggingface/diffusers.git -cd diffusers -``` - -**PyTorch의 경우** - -``` -pip install -e ".[torch]" -``` - -**Flax의 경우** - -``` -pip install -e ".[flax]" -``` - -이러한 명령어들은 저장소를 복제한 폴더와 Python 라이브러리 경로를 연결합니다. -Python은 이제 일반 라이브러리 경로에 더하여 복제한 폴더 내부를 살펴봅니다. -예를들어 Python 패키지가 `~/anaconda3/envs/main/lib/python3.8/site-packages/`에 설치되어 있는 경우 Python은 복제한 폴더인 `~/diffusers/`도 검색합니다. - - - -라이브러리를 계속 사용하려면 `diffusers` 폴더를 유지해야 합니다. - - - -이제 다음 명령어를 사용하여 최신 버전의 🤗 Diffusers로 쉽게 업데이트할 수 있습니다: - -```bash -cd ~/diffusers/ -git pull -``` - -이렇게 하면, 다음에 실행할 때 Python 환경이 🤗 Diffusers의 `main` 버전을 찾게 됩니다. - -## 텔레메트리 로깅에 대한 알림 - -우리 라이브러리는 `from_pretrained()` 요청 중에 텔레메트리 정보를 원격으로 수집합니다. -이 데이터에는 Diffusers 및 PyTorch/Flax의 버전, 요청된 모델 또는 파이프라인 클래스, 그리고 허브에서 호스팅되는 경우 사전학습된 체크포인트에 대한 경로를 포함합니다. -이 사용 데이터는 문제를 디버깅하고 새로운 기능의 우선순위를 지정하는데 도움이 됩니다. -텔레메트리는 HuggingFace 허브에서 모델과 파이프라인을 불러올 때만 전송되며, 로컬 사용 중에는 수집되지 않습니다. - -우리는 추가 정보를 공유하지 않기를 원하는 사람이 있다는 것을 이해하고 개인 정보를 존중하므로, 터미널에서 `DISABLE_TELEMETRY` 환경 변수를 설정하여 텔레메트리 수집을 비활성화할 수 있습니다. - -Linux/MacOS에서: -```bash -export DISABLE_TELEMETRY=YES -``` - -Windows에서: -```bash -set DISABLE_TELEMETRY=YES -``` \ No newline at end of file diff --git a/spaces/pakooo/Text2Image/README.md b/spaces/pakooo/Text2Image/README.md deleted file mode 100644 index f326898bc24067a7c44b0e55bfa2ace562581751..0000000000000000000000000000000000000000 --- a/spaces/pakooo/Text2Image/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text To Image -emoji: 👀 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -duplicated_from: yizhangliu/Text-to-Image ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/parkyzh/bingo/src/lib/utils.ts b/spaces/parkyzh/bingo/src/lib/utils.ts deleted file mode 100644 index 07feedb34e356b1b3cf867872f32d47a96ae12fb..0000000000000000000000000000000000000000 --- a/spaces/parkyzh/bingo/src/lib/utils.ts +++ /dev/null @@ -1,138 +0,0 @@ -import { clsx, type ClassValue } from 'clsx' -import { customAlphabet } from 'nanoid' -import { twMerge } from 'tailwind-merge' - -export function cn(...inputs: ClassValue[]) { - return twMerge(clsx(inputs)) -} - -export const nanoid = customAlphabet( - '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', - 7 -) // 7-character random string - -export function createChunkDecoder() { - const decoder = new TextDecoder() - return function (chunk: Uint8Array | undefined): string { - if (!chunk) return '' - return decoder.decode(chunk, { stream: true }) - } -} - -export function random (start: number, end: number) { - return start + Math.ceil(Math.random() * (end - start)) -} - -export function randomIP() { - return `11.${random(104, 107)}.${random(1, 255)}.${random(1, 255)}` -} - -export function parseHeadersFromCurl(content: string) { - const re = /-H '([^:]+):\s*([^']+)/mg - const headers: HeadersInit = {} - content = content.replaceAll('-H "', '-H \'').replaceAll('" ^', '\'\\').replaceAll('^\\^"', '"') // 将 cmd curl 转成 bash curl - content.replace(re, (_: string, key: string, value: string) => { - headers[key] = value - return '' - }) - - return headers -} - -export const ChunkKeys = ['BING_HEADER', 'BING_HEADER1', 'BING_HEADER2'] -export function encodeHeadersToCookie(content: string) { - const base64Content = btoa(content) - const contentChunks = base64Content.match(/.{1,4000}/g) || [] - return ChunkKeys.map((key, index) => `${key}=${contentChunks[index] ?? ''}`) -} - -export function extraCurlFromCookie(cookies: Partial<{ [key: string]: string }>) { - let base64Content = '' - ChunkKeys.forEach((key) => { - base64Content += (cookies[key] || '') - }) - try { - return atob(base64Content) - } catch(e) { - return '' - } -} - -export function extraHeadersFromCookie(cookies: Partial<{ [key: string]: string }>) { - return parseHeadersFromCurl(extraCurlFromCookie(cookies)) -} - -export function formatDate(input: string | number | Date): string { - const date = new Date(input) - return date.toLocaleDateString('en-US', { - month: 'long', - day: 'numeric', - year: 'numeric' - }) -} - -export function parseCookie(cookie: string, cookieName: string) { - const targetCookie = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`).test(cookie) ? RegExp.$1 : cookie - return targetCookie ? decodeURIComponent(targetCookie).trim() : cookie.indexOf('=') === -1 ? cookie.trim() : '' -} - -export function parseCookies(cookie: string, cookieNames: string[]) { - const cookies: { [key: string]: string } = {} - cookieNames.forEach(cookieName => { - cookies[cookieName] = parseCookie(cookie, cookieName) - }) - return cookies -} - -export const DEFAULT_UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0' -export const DEFAULT_IP = process.env.BING_IP || randomIP() - -export function parseUA(ua?: string, default_ua = DEFAULT_UA) { - return / EDGE?/i.test(decodeURIComponent(ua || '')) ? decodeURIComponent(ua!.trim()) : default_ua -} - -export function createHeaders(cookies: Partial<{ [key: string]: string }>, defaultHeaders?: Partial<{ [key: string]: string }>) { - let { - BING_COOKIE = process.env.BING_COOKIE, - BING_UA = process.env.BING_UA, - BING_IP = process.env.BING_IP, - BING_HEADER = process.env.BING_HEADER, - } = cookies - - if (BING_HEADER) { - return extraHeadersFromCookie({ - BING_HEADER, - ...cookies, - }) - } - - const ua = parseUA(BING_UA) - - if (!BING_COOKIE) { - BING_COOKIE = defaultHeaders?.IMAGE_BING_COOKIE || 'xxx' // hf 暂时不用 Cookie 也可以正常使用 - } - - const parsedCookie = parseCookie(BING_COOKIE, '_U') - if (!parsedCookie) { - throw new Error('Invalid Cookie') - } - return { - 'x-forwarded-for': BING_IP || DEFAULT_IP, - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', - 'User-Agent': ua!, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: `_U=${parsedCookie}` || '', - } -} - -export class WatchDog { - private tid = 0 - watch(fn: Function, timeout = 2000) { - clearTimeout(this.tid) - this.tid = setTimeout(fn, timeout + Math.random() * 1000) - } - reset() { - clearTimeout(this.tid) - } -} diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/oldresnet152.py b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/oldresnet152.py deleted file mode 100644 index f783c2c0c0b11a9bde635dac3090c38af448ab88..0000000000000000000000000000000000000000 --- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/oldresnet152.py +++ /dev/null @@ -1,922 +0,0 @@ - -import torch -import torch.nn as nn - -from functools import reduce -from torch.autograd import Variable - -def load_places_resnet152(weight_file): - model = OldResNet152() - state_dict = torch.load(weight_file) - model.load_state_dict(state_dict) - return model - -class LambdaBase(nn.Sequential): - def __init__(self, fn, *args): - super(LambdaBase, self).__init__(*args) - self.lambda_func = fn - - def forward_prepare(self, input): - output = [] - for module in self._modules.values(): - output.append(module(input)) - return output if output else input - -class Lambda(LambdaBase): - def forward(self, input): - return self.lambda_func(self.forward_prepare(input)) - -class LambdaMap(LambdaBase): - def forward(self, input): - return list(map(self.lambda_func,self.forward_prepare(input))) - -class LambdaReduce(LambdaBase): - def forward(self, input): - return reduce(self.lambda_func,self.forward_prepare(input)) - - -class OldResNet152(nn.Sequential): - def __init__(self): - children = [ -# resnet152_places365 = nn.Sequential( # Sequential, - nn.Conv2d(3,64,(7, 7),(2, 2),(3, 3),1,1,bias=False), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.MaxPool2d((3, 3),(2, 2),(1, 1)), - nn.Sequential( # Sequential, - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(64,64,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - ), - nn.Sequential( # Sequential, - nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - ), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(256,64,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(256,64,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - ), - nn.Sequential( # Sequential, - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(256,128,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3),(2, 2),(1, 1),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - nn.Sequential( # Sequential, - nn.Conv2d(256,512,(1, 1),(2, 2),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - ), - nn.Sequential( # Sequential, - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(512,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(2, 2),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - nn.Sequential( # Sequential, - nn.Conv2d(512,1024,(1, 1),(2, 2),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(1024), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - ), - nn.Sequential( # Sequential, - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - nn.ReLU(), - nn.Conv2d(512,512,(3, 3),(2, 2),(1, 1),1,1,bias=False), - nn.BatchNorm2d(512), - nn.ReLU(), - nn.Conv2d(512,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(2048), - ), - nn.Sequential( # Sequential, - nn.Conv2d(1024,2048,(1, 1),(2, 2),(0, 0),1,1,bias=False), - nn.BatchNorm2d(2048), - ), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(2048,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - nn.ReLU(), - nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(512), - nn.ReLU(), - nn.Conv2d(512,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(2048), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(2048,512,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(512), - nn.ReLU(), - nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,1,bias=False), - nn.BatchNorm2d(512), - nn.ReLU(), - nn.Conv2d(512,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False), - nn.BatchNorm2d(2048), - ), - Lambda(lambda x: x), # Identity, - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - nn.ReLU(), - ), - ), - nn.AvgPool2d((7, 7),(1, 1)), - Lambda(lambda x: x.view(x.size(0),-1)), # View, - nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ) - ,nn.Linear(2048,365)), # Linear, - ] - - super(OldResNet152, self).__init__(*children) diff --git a/spaces/penguin2023/vncs/start.sh b/spaces/penguin2023/vncs/start.sh deleted file mode 100644 index d9b29a76d1e08e744c659939c871e8f69fad978d..0000000000000000000000000000000000000000 --- a/spaces/penguin2023/vncs/start.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# ls -# # umask 0077 # use safe default permissions -# mkdir -p "$HO/.vnc" # create config directory -# chmod go-rwx "$HO/.vnc" # enforce safe permissions -# ls -l -tigervnc_path="/tigervnc-${tigervnc_version}.x86_64/usr/bin" - -# Start TigerVNC -if [ ! -z $VNC_PASSWD ]; then - ${tigervnc_path}/vncpasswd -f <<< "$VNC_PASSWD" > ".vnc/passwd" - ${tigervnc_path}/vncserver -rfbport 5900 -geometry ${GEOMETRY} -depth ${DEPTH} -else - ${tigervnc_path}/vncpasswd -f <<< "" > ".vnc/passwd" - ${tigervnc_path}/vncserver -rfbport 5900 -geometry ${GEOMETRY} -depth ${DEPTH} -SecurityTypes None -fi - -cat /noVNC-${noVNC_version}/utils/launch.sh - -# Start noVNC -/noVNC-${noVNC_version}/utils/launch.sh \ No newline at end of file diff --git a/spaces/philsark/clip-guided-diffusion-identity/README.md b/spaces/philsark/clip-guided-diffusion-identity/README.md deleted file mode 100644 index c87d045351d4dd8f256b19497ce1ba28b9e36f8b..0000000000000000000000000000000000000000 --- a/spaces/philsark/clip-guided-diffusion-identity/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Clip-Guided-Diffusion-Identity -emoji: 📈 -colorFrom: purple -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/Liaobots.py b/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/Liaobots.py deleted file mode 100644 index 985bf53ddfd3877db3c60aedee86db11ec0e7243..0000000000000000000000000000000000000000 --- a/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/Liaobots.py +++ /dev/null @@ -1,47 +0,0 @@ -import os, uuid, requests -from ...typing import sha256, Dict, get_type_hints - -url = 'https://liaobots.com' -model = ['gpt-4-0613'] -supports_stream = True -needs_auth = True - -models = { - 'gpt-4-0613': { - "id":"gpt-4-0613", - "name":"GPT-4", - "maxLength":24000, - "tokenLimit":8000 - } -} - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - - print(kwargs) - - headers = { - 'authority': 'liaobots.com', - 'content-type': 'application/json', - 'origin': 'https://liaobots.com', - 'referer': 'https://liaobots.com/', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', - 'x-auth-code': 'P6cPPK6Z8JDG3' - } - - json_data = { - 'conversationId': str(uuid.uuid4()), - 'model': models[model], - 'authcode':"jrzVZMJiwN0NU", - 'messages': messages, - 'key': '', - 'prompt': "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", - } - - response = requests.post('https://liaobots.com/api/chat', - headers=headers, json=json_data, stream=True) - - for token in response.iter_content(chunk_size=2046): - yield (token.decode('cp1251')) - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/pixiou/bingo/README.md b/spaces/pixiou/bingo/README.md deleted file mode 100644 index d65eafbc8431818f738e8e086455fa6159f101bb..0000000000000000000000000000000000000000 --- a/spaces/pixiou/bingo/README.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: bingo -emoji: 📉 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
      - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -
      - -## 演示站点 - -https://bing.github1s.tk - - - -[![img](./docs/images/demo.png)](https://bing.github1s.tk) - -## 功能和特点 - -- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。 -- 支持 Docker 构建,方便快捷地部署和访问。 -- Cookie 可全局配置,全局共享。 -- 支持持续语音对话 - -## RoadMap - - - [x] 支持 wss 转发 - - [x] 支持一键部署 - - [x] 优化移动端展示 - - [x] 支持画图 - - [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器) - - [x] 支持语音输出(需要手动开启) - - [x] 支持图片输入 - - [x] 支持自定义域名 - - [ ] 支持历史记录 - - [ ] 适配深色模式 - - [ ] 支持内置提示词 - - [ ] 支持离线访问 - - [ ] 国际化翻译 - -## 一键部署 -你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。 - -### 部署到 Huggingface -1. 点击此图标 -[![Deploy to HuggingFace](https://img.shields.io/badge/%E7%82%B9%E5%87%BB%E9%83%A8%E7%BD%B2-%F0%9F%A4%97-fff)](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。 - -2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。 - -> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的 -> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名) -> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4) - -### 使用Cloudflare Workers自定义域名 - -> 核心代码 [worker.js](./cloudflare/worker.js) - -- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up) - -- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google) - -- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。 - -- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。 - -- 触发器 中自定义访问域名。 - -### 部署其它平台 -
      - -由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看 - - -#### 部署到 Netlify -[![Deploy to Netlify Button](https://www.netlify.com/img/deploy/button.svg)](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo) - -#### 部署到 Vercel -如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用 - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example) - -#### 部署到 Render - -[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/weaigc/bingo) -
      - -## 环境和依赖 - -- Node.js >= 18 -- Bing AI 的[身份信息](#如何获取-BING_HEADER)) - -## 安装和使用 - -> 由于目前微软封杀比较严重,推荐优先使用 [部署 Huggingface](#部署到-huggingface) 。 - -* 使用 Node 启动 - -```bash -git clone https://github.com/weaigc/bingo.git -npm i # 推荐使用 pnpm i -npm run build -npm run start -``` - -* 使用 Docker 启动 -```bash -docker pull weaigc/bingo -docker run --rm -it -p 7860:7860 weaigc/bingo -# 或者 -docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo -``` - -## 如何获取 BING_HEADER -> 配置了 BING_HEADER 意味着你将自己的账号共享给所有使用此服务的人,如果不需要免登录画图的功能,不建议设置此变量 - -打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge ,通过人机校验,然后 - -![BING HEADER](./docs/images/curl.png) - -> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证) - -以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。 -
      -正常格式/网页端保存的格式(格式仅供参考) - -``` -curl 'https://www.bing.com/turing/captcha/challenge' \ - -H 'authority: www.bing.com' \ - -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \ - -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \ - -H 'cache-control: max-age=0' \ - -H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \ - -H 'dnt: 1' \ - -H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \ - -H 'sec-ch-ua-arch: "x86"' \ - -H 'sec-ch-ua-bitness: "64"' \ - -H 'sec-ch-ua-full-version: "116.0.1938.29"' \ - -H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \ - -H 'sec-ch-ua-mobile: ?0' \ - -H 'sec-ch-ua-model: ""' \ - -H 'sec-ch-ua-platform: "Windows"' \ - -H 'sec-ch-ua-platform-version: "15.0.0"' \ - -H 'sec-fetch-dest: document' \ - -H 'sec-fetch-mode: navigate' \ - -H 'sec-fetch-site: none' \ - -H 'sec-fetch-user: ?1' \ - -H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \ - -H 'sec-ms-gec-version: 1-116.0.1938.29' \ - -H 'upgrade-insecure-requests: 1' \ - -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \ - -H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \ - -H 'x-edge-shopping-flag: 1' \ - --compressed -``` -
      - -
      -转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式) - -``` -Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA== -``` -
      - - -## 鸣谢 - - 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。 - - 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。 - - -## 答疑及交流 - - - -## License - -MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE). - - diff --git a/spaces/pknez/face-swap-docker/Dockerfile b/spaces/pknez/face-swap-docker/Dockerfile deleted file mode 100644 index c3ee9b1e84a98313a8ddea96c99c573c9c795b8f..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM python:3.11 -WORKDIR /usr/src/app -RUN apt-get update && apt-get install -y libgl1-mesa-glx -COPY requirements.txt ./ -RUN pip install --no-cache-dir -r requirements.txt -COPY . . -CMD ["python", "run.py"] diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py deleted file mode 100644 index edc19627dba6835339768ccbaf726db21d8ac212..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py +++ /dev/null @@ -1,197 +0,0 @@ -""" - pygments.style - ~~~~~~~~~~~~~~ - - Basic style object. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pip._vendor.pygments.token import Token, STANDARD_TYPES - -# Default mapping of ansixxx to RGB colors. -_ansimap = { - # dark - 'ansiblack': '000000', - 'ansired': '7f0000', - 'ansigreen': '007f00', - 'ansiyellow': '7f7fe0', - 'ansiblue': '00007f', - 'ansimagenta': '7f007f', - 'ansicyan': '007f7f', - 'ansigray': 'e5e5e5', - # normal - 'ansibrightblack': '555555', - 'ansibrightred': 'ff0000', - 'ansibrightgreen': '00ff00', - 'ansibrightyellow': 'ffff00', - 'ansibrightblue': '0000ff', - 'ansibrightmagenta': 'ff00ff', - 'ansibrightcyan': '00ffff', - 'ansiwhite': 'ffffff', -} -# mapping of deprecated #ansixxx colors to new color names -_deprecated_ansicolors = { - # dark - '#ansiblack': 'ansiblack', - '#ansidarkred': 'ansired', - '#ansidarkgreen': 'ansigreen', - '#ansibrown': 'ansiyellow', - '#ansidarkblue': 'ansiblue', - '#ansipurple': 'ansimagenta', - '#ansiteal': 'ansicyan', - '#ansilightgray': 'ansigray', - # normal - '#ansidarkgray': 'ansibrightblack', - '#ansired': 'ansibrightred', - '#ansigreen': 'ansibrightgreen', - '#ansiyellow': 'ansibrightyellow', - '#ansiblue': 'ansibrightblue', - '#ansifuchsia': 'ansibrightmagenta', - '#ansiturquoise': 'ansibrightcyan', - '#ansiwhite': 'ansiwhite', -} -ansicolors = set(_ansimap) - - -class StyleMeta(type): - - def __new__(mcs, name, bases, dct): - obj = type.__new__(mcs, name, bases, dct) - for token in STANDARD_TYPES: - if token not in obj.styles: - obj.styles[token] = '' - - def colorformat(text): - if text in ansicolors: - return text - if text[0:1] == '#': - col = text[1:] - if len(col) == 6: - return col - elif len(col) == 3: - return col[0] * 2 + col[1] * 2 + col[2] * 2 - elif text == '': - return '' - elif text.startswith('var') or text.startswith('calc'): - return text - assert False, "wrong color format %r" % text - - _styles = obj._styles = {} - - for ttype in obj.styles: - for token in ttype.split(): - if token in _styles: - continue - ndef = _styles.get(token.parent, None) - styledefs = obj.styles.get(token, '').split() - if not ndef or token is None: - ndef = ['', 0, 0, 0, '', '', 0, 0, 0] - elif 'noinherit' in styledefs and token is not Token: - ndef = _styles[Token][:] - else: - ndef = ndef[:] - _styles[token] = ndef - for styledef in obj.styles.get(token, '').split(): - if styledef == 'noinherit': - pass - elif styledef == 'bold': - ndef[1] = 1 - elif styledef == 'nobold': - ndef[1] = 0 - elif styledef == 'italic': - ndef[2] = 1 - elif styledef == 'noitalic': - ndef[2] = 0 - elif styledef == 'underline': - ndef[3] = 1 - elif styledef == 'nounderline': - ndef[3] = 0 - elif styledef[:3] == 'bg:': - ndef[4] = colorformat(styledef[3:]) - elif styledef[:7] == 'border:': - ndef[5] = colorformat(styledef[7:]) - elif styledef == 'roman': - ndef[6] = 1 - elif styledef == 'sans': - ndef[7] = 1 - elif styledef == 'mono': - ndef[8] = 1 - else: - ndef[0] = colorformat(styledef) - - return obj - - def style_for_token(cls, token): - t = cls._styles[token] - ansicolor = bgansicolor = None - color = t[0] - if color in _deprecated_ansicolors: - color = _deprecated_ansicolors[color] - if color in ansicolors: - ansicolor = color - color = _ansimap[color] - bgcolor = t[4] - if bgcolor in _deprecated_ansicolors: - bgcolor = _deprecated_ansicolors[bgcolor] - if bgcolor in ansicolors: - bgansicolor = bgcolor - bgcolor = _ansimap[bgcolor] - - return { - 'color': color or None, - 'bold': bool(t[1]), - 'italic': bool(t[2]), - 'underline': bool(t[3]), - 'bgcolor': bgcolor or None, - 'border': t[5] or None, - 'roman': bool(t[6]) or None, - 'sans': bool(t[7]) or None, - 'mono': bool(t[8]) or None, - 'ansicolor': ansicolor, - 'bgansicolor': bgansicolor, - } - - def list_styles(cls): - return list(cls) - - def styles_token(cls, ttype): - return ttype in cls._styles - - def __iter__(cls): - for token in cls._styles: - yield token, cls.style_for_token(token) - - def __len__(cls): - return len(cls._styles) - - -class Style(metaclass=StyleMeta): - - #: overall background color (``None`` means transparent) - background_color = '#ffffff' - - #: highlight background color - highlight_color = '#ffffcc' - - #: line number font color - line_number_color = 'inherit' - - #: line number background color - line_number_background_color = 'transparent' - - #: special line number font color - line_number_special_color = '#000000' - - #: special line number background color - line_number_special_background_color = '#ffffc0' - - #: Style definitions for individual token types. - styles = {} - - # Attribute for lexers defined within Pygments. If set - # to True, the style is not shown in the style gallery - # on the website. This is intended for language-specific - # styles. - web_style_gallery_exclude = False diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/cli/convert.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/cli/convert.py deleted file mode 100644 index 1ce9b5f3c16adcd07672d5dbddcff9f44f4b82a7..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/cli/convert.py +++ /dev/null @@ -1,273 +0,0 @@ -from __future__ import annotations - -import os.path -import re -import shutil -import tempfile -import zipfile -from glob import iglob - -from ..bdist_wheel import bdist_wheel -from ..wheelfile import WheelFile -from . import WheelError - -try: - from setuptools import Distribution -except ImportError: - from distutils.dist import Distribution - -egg_info_re = re.compile( - r""" - (?P.+?)-(?P.+?) - (-(?Ppy\d\.\d+) - (-(?P.+?))? - )?.egg$""", - re.VERBOSE, -) - - -class _bdist_wheel_tag(bdist_wheel): - # allow the client to override the default generated wheel tag - # The default bdist_wheel implementation uses python and abi tags - # of the running python process. This is not suitable for - # generating/repackaging prebuild binaries. - - full_tag_supplied = False - full_tag = None # None or a (pytag, soabitag, plattag) triple - - def get_tag(self): - if self.full_tag_supplied and self.full_tag is not None: - return self.full_tag - else: - return bdist_wheel.get_tag(self) - - -def egg2wheel(egg_path: str, dest_dir: str): - filename = os.path.basename(egg_path) - match = egg_info_re.match(filename) - if not match: - raise WheelError(f"Invalid egg file name: {filename}") - - egg_info = match.groupdict() - dir = tempfile.mkdtemp(suffix="_e2w") - if os.path.isfile(egg_path): - # assume we have a bdist_egg otherwise - with zipfile.ZipFile(egg_path) as egg: - egg.extractall(dir) - else: - # support buildout-style installed eggs directories - for pth in os.listdir(egg_path): - src = os.path.join(egg_path, pth) - if os.path.isfile(src): - shutil.copy2(src, dir) - else: - shutil.copytree(src, os.path.join(dir, pth)) - - pyver = egg_info["pyver"] - if pyver: - pyver = egg_info["pyver"] = pyver.replace(".", "") - - arch = (egg_info["arch"] or "any").replace(".", "_").replace("-", "_") - - # assume all binary eggs are for CPython - abi = "cp" + pyver[2:] if arch != "any" else "none" - - root_is_purelib = egg_info["arch"] is None - if root_is_purelib: - bw = bdist_wheel(Distribution()) - else: - bw = _bdist_wheel_tag(Distribution()) - - bw.root_is_pure = root_is_purelib - bw.python_tag = pyver - bw.plat_name_supplied = True - bw.plat_name = egg_info["arch"] or "any" - if not root_is_purelib: - bw.full_tag_supplied = True - bw.full_tag = (pyver, abi, arch) - - dist_info_dir = os.path.join(dir, "{name}-{ver}.dist-info".format(**egg_info)) - bw.egg2dist(os.path.join(dir, "EGG-INFO"), dist_info_dir) - bw.write_wheelfile(dist_info_dir, generator="egg2wheel") - wheel_name = "{name}-{ver}-{pyver}-{}-{}.whl".format(abi, arch, **egg_info) - with WheelFile(os.path.join(dest_dir, wheel_name), "w") as wf: - wf.write_files(dir) - - shutil.rmtree(dir) - - -def parse_wininst_info(wininfo_name, egginfo_name): - """Extract metadata from filenames. - - Extracts the 4 metadataitems needed (name, version, pyversion, arch) from - the installer filename and the name of the egg-info directory embedded in - the zipfile (if any). - - The egginfo filename has the format:: - - name-ver(-pyver)(-arch).egg-info - - The installer filename has the format:: - - name-ver.arch(-pyver).exe - - Some things to note: - - 1. The installer filename is not definitive. An installer can be renamed - and work perfectly well as an installer. So more reliable data should - be used whenever possible. - 2. The egg-info data should be preferred for the name and version, because - these come straight from the distutils metadata, and are mandatory. - 3. The pyver from the egg-info data should be ignored, as it is - constructed from the version of Python used to build the installer, - which is irrelevant - the installer filename is correct here (even to - the point that when it's not there, any version is implied). - 4. The architecture must be taken from the installer filename, as it is - not included in the egg-info data. - 5. Architecture-neutral installers still have an architecture because the - installer format itself (being executable) is architecture-specific. We - should therefore ignore the architecture if the content is pure-python. - """ - - egginfo = None - if egginfo_name: - egginfo = egg_info_re.search(egginfo_name) - if not egginfo: - raise ValueError(f"Egg info filename {egginfo_name} is not valid") - - # Parse the wininst filename - # 1. Distribution name (up to the first '-') - w_name, sep, rest = wininfo_name.partition("-") - if not sep: - raise ValueError(f"Installer filename {wininfo_name} is not valid") - - # Strip '.exe' - rest = rest[:-4] - # 2. Python version (from the last '-', must start with 'py') - rest2, sep, w_pyver = rest.rpartition("-") - if sep and w_pyver.startswith("py"): - rest = rest2 - w_pyver = w_pyver.replace(".", "") - else: - # Not version specific - use py2.py3. While it is possible that - # pure-Python code is not compatible with both Python 2 and 3, there - # is no way of knowing from the wininst format, so we assume the best - # here (the user can always manually rename the wheel to be more - # restrictive if needed). - w_pyver = "py2.py3" - # 3. Version and architecture - w_ver, sep, w_arch = rest.rpartition(".") - if not sep: - raise ValueError(f"Installer filename {wininfo_name} is not valid") - - if egginfo: - w_name = egginfo.group("name") - w_ver = egginfo.group("ver") - - return {"name": w_name, "ver": w_ver, "arch": w_arch, "pyver": w_pyver} - - -def wininst2wheel(path, dest_dir): - with zipfile.ZipFile(path) as bdw: - # Search for egg-info in the archive - egginfo_name = None - for filename in bdw.namelist(): - if ".egg-info" in filename: - egginfo_name = filename - break - - info = parse_wininst_info(os.path.basename(path), egginfo_name) - - root_is_purelib = True - for zipinfo in bdw.infolist(): - if zipinfo.filename.startswith("PLATLIB"): - root_is_purelib = False - break - if root_is_purelib: - paths = {"purelib": ""} - else: - paths = {"platlib": ""} - - dist_info = "{name}-{ver}".format(**info) - datadir = "%s.data/" % dist_info - - # rewrite paths to trick ZipFile into extracting an egg - # XXX grab wininst .ini - between .exe, padding, and first zip file. - members = [] - egginfo_name = "" - for zipinfo in bdw.infolist(): - key, basename = zipinfo.filename.split("/", 1) - key = key.lower() - basepath = paths.get(key, None) - if basepath is None: - basepath = datadir + key.lower() + "/" - oldname = zipinfo.filename - newname = basepath + basename - zipinfo.filename = newname - del bdw.NameToInfo[oldname] - bdw.NameToInfo[newname] = zipinfo - # Collect member names, but omit '' (from an entry like "PLATLIB/" - if newname: - members.append(newname) - # Remember egg-info name for the egg2dist call below - if not egginfo_name: - if newname.endswith(".egg-info"): - egginfo_name = newname - elif ".egg-info/" in newname: - egginfo_name, sep, _ = newname.rpartition("/") - dir = tempfile.mkdtemp(suffix="_b2w") - bdw.extractall(dir, members) - - # egg2wheel - abi = "none" - pyver = info["pyver"] - arch = (info["arch"] or "any").replace(".", "_").replace("-", "_") - # Wininst installers always have arch even if they are not - # architecture-specific (because the format itself is). - # So, assume the content is architecture-neutral if root is purelib. - if root_is_purelib: - arch = "any" - # If the installer is architecture-specific, it's almost certainly also - # CPython-specific. - if arch != "any": - pyver = pyver.replace("py", "cp") - wheel_name = "-".join((dist_info, pyver, abi, arch)) - if root_is_purelib: - bw = bdist_wheel(Distribution()) - else: - bw = _bdist_wheel_tag(Distribution()) - - bw.root_is_pure = root_is_purelib - bw.python_tag = pyver - bw.plat_name_supplied = True - bw.plat_name = info["arch"] or "any" - - if not root_is_purelib: - bw.full_tag_supplied = True - bw.full_tag = (pyver, abi, arch) - - dist_info_dir = os.path.join(dir, "%s.dist-info" % dist_info) - bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir) - bw.write_wheelfile(dist_info_dir, generator="wininst2wheel") - - wheel_path = os.path.join(dest_dir, wheel_name) - with WheelFile(wheel_path, "w") as wf: - wf.write_files(dir) - - shutil.rmtree(dir) - - -def convert(files, dest_dir, verbose): - for pat in files: - for installer in iglob(pat): - if os.path.splitext(installer)[1] == ".egg": - conv = egg2wheel - else: - conv = wininst2wheel - - if verbose: - print(f"{installer}... ", flush=True) - - conv(installer, dest_dir) - if verbose: - print("OK") diff --git a/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/structuredquery.h b/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/structuredquery.h deleted file mode 100644 index bca20a9adac790f1f46ca915c121beb01b07c0f6..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/structuredquery.h +++ /dev/null @@ -1,2478 +0,0 @@ - - -/* this ALWAYS GENERATED file contains the definitions for the interfaces */ - - - /* File created by MIDL compiler version 7.00.0499 */ -/* Compiler settings for structuredquery.idl: - Oicf, W1, Zp8, env=Win32 (32b run) - protocol : dce , ms_ext, c_ext, robust - error checks: allocation ref bounds_check enum stub_data - VC __declspec() decoration level: - __declspec(uuid()), __declspec(selectany), __declspec(novtable) - DECLSPEC_UUID(), MIDL_INTERFACE() -*/ -//@@MIDL_FILE_HEADING( ) - -#pragma warning( disable: 4049 ) /* more than 64k source lines */ - - -/* verify that the version is high enough to compile this file*/ -#ifndef __REQUIRED_RPCNDR_H_VERSION__ -#define __REQUIRED_RPCNDR_H_VERSION__ 475 -#endif - -/* verify that the version is high enough to compile this file*/ -#ifndef __REQUIRED_RPCSAL_H_VERSION__ -#define __REQUIRED_RPCSAL_H_VERSION__ 100 -#endif - -#include "rpc.h" -#include "rpcndr.h" - -#ifndef __RPCNDR_H_VERSION__ -#error this stub requires an updated version of -#endif // __RPCNDR_H_VERSION__ - -#ifndef COM_NO_WINDOWS_H -#include "windows.h" -#include "ole2.h" -#endif /*COM_NO_WINDOWS_H*/ - -#ifndef __structuredquery_h__ -#define __structuredquery_h__ - -#if defined(_MSC_VER) && (_MSC_VER >= 1020) -#pragma once -#endif - -/* Forward Declarations */ - -#ifndef __IQueryParser_FWD_DEFINED__ -#define __IQueryParser_FWD_DEFINED__ -typedef interface IQueryParser IQueryParser; -#endif /* __IQueryParser_FWD_DEFINED__ */ - - -#ifndef __IConditionFactory_FWD_DEFINED__ -#define __IConditionFactory_FWD_DEFINED__ -typedef interface IConditionFactory IConditionFactory; -#endif /* __IConditionFactory_FWD_DEFINED__ */ - - -#ifndef __IQuerySolution_FWD_DEFINED__ -#define __IQuerySolution_FWD_DEFINED__ -typedef interface IQuerySolution IQuerySolution; -#endif /* __IQuerySolution_FWD_DEFINED__ */ - - -#ifndef __ICondition_FWD_DEFINED__ -#define __ICondition_FWD_DEFINED__ -typedef interface ICondition ICondition; -#endif /* __ICondition_FWD_DEFINED__ */ - - -#ifndef __IConditionGenerator_FWD_DEFINED__ -#define __IConditionGenerator_FWD_DEFINED__ -typedef interface IConditionGenerator IConditionGenerator; -#endif /* __IConditionGenerator_FWD_DEFINED__ */ - - -#ifndef __IRichChunk_FWD_DEFINED__ -#define __IRichChunk_FWD_DEFINED__ -typedef interface IRichChunk IRichChunk; -#endif /* __IRichChunk_FWD_DEFINED__ */ - - -#ifndef __IInterval_FWD_DEFINED__ -#define __IInterval_FWD_DEFINED__ -typedef interface IInterval IInterval; -#endif /* __IInterval_FWD_DEFINED__ */ - - -#ifndef __IMetaData_FWD_DEFINED__ -#define __IMetaData_FWD_DEFINED__ -typedef interface IMetaData IMetaData; -#endif /* __IMetaData_FWD_DEFINED__ */ - - -#ifndef __IEntity_FWD_DEFINED__ -#define __IEntity_FWD_DEFINED__ -typedef interface IEntity IEntity; -#endif /* __IEntity_FWD_DEFINED__ */ - - -#ifndef __IRelationship_FWD_DEFINED__ -#define __IRelationship_FWD_DEFINED__ -typedef interface IRelationship IRelationship; -#endif /* __IRelationship_FWD_DEFINED__ */ - - -#ifndef __INamedEntity_FWD_DEFINED__ -#define __INamedEntity_FWD_DEFINED__ -typedef interface INamedEntity INamedEntity; -#endif /* __INamedEntity_FWD_DEFINED__ */ - - -#ifndef __ISchemaProvider_FWD_DEFINED__ -#define __ISchemaProvider_FWD_DEFINED__ -typedef interface ISchemaProvider ISchemaProvider; -#endif /* __ISchemaProvider_FWD_DEFINED__ */ - - -#ifndef __ITokenCollection_FWD_DEFINED__ -#define __ITokenCollection_FWD_DEFINED__ -typedef interface ITokenCollection ITokenCollection; -#endif /* __ITokenCollection_FWD_DEFINED__ */ - - -#ifndef __INamedEntityCollector_FWD_DEFINED__ -#define __INamedEntityCollector_FWD_DEFINED__ -typedef interface INamedEntityCollector INamedEntityCollector; -#endif /* __INamedEntityCollector_FWD_DEFINED__ */ - - -#ifndef __ISchemaLocalizerSupport_FWD_DEFINED__ -#define __ISchemaLocalizerSupport_FWD_DEFINED__ -typedef interface ISchemaLocalizerSupport ISchemaLocalizerSupport; -#endif /* __ISchemaLocalizerSupport_FWD_DEFINED__ */ - - -#ifndef __IQueryParserManager_FWD_DEFINED__ -#define __IQueryParserManager_FWD_DEFINED__ -typedef interface IQueryParserManager IQueryParserManager; -#endif /* __IQueryParserManager_FWD_DEFINED__ */ - - -#ifndef __QueryParser_FWD_DEFINED__ -#define __QueryParser_FWD_DEFINED__ - -#ifdef __cplusplus -typedef class QueryParser QueryParser; -#else -typedef struct QueryParser QueryParser; -#endif /* __cplusplus */ - -#endif /* __QueryParser_FWD_DEFINED__ */ - - -#ifndef __NegationCondition_FWD_DEFINED__ -#define __NegationCondition_FWD_DEFINED__ - -#ifdef __cplusplus -typedef class NegationCondition NegationCondition; -#else -typedef struct NegationCondition NegationCondition; -#endif /* __cplusplus */ - -#endif /* __NegationCondition_FWD_DEFINED__ */ - - -#ifndef __CompoundCondition_FWD_DEFINED__ -#define __CompoundCondition_FWD_DEFINED__ - -#ifdef __cplusplus -typedef class CompoundCondition CompoundCondition; -#else -typedef struct CompoundCondition CompoundCondition; -#endif /* __cplusplus */ - -#endif /* __CompoundCondition_FWD_DEFINED__ */ - - -#ifndef __LeafCondition_FWD_DEFINED__ -#define __LeafCondition_FWD_DEFINED__ - -#ifdef __cplusplus -typedef class LeafCondition LeafCondition; -#else -typedef struct LeafCondition LeafCondition; -#endif /* __cplusplus */ - -#endif /* __LeafCondition_FWD_DEFINED__ */ - - -#ifndef __ConditionFactory_FWD_DEFINED__ -#define __ConditionFactory_FWD_DEFINED__ - -#ifdef __cplusplus -typedef class ConditionFactory ConditionFactory; -#else -typedef struct ConditionFactory ConditionFactory; -#endif /* __cplusplus */ - -#endif /* __ConditionFactory_FWD_DEFINED__ */ - - -#ifndef __Interval_FWD_DEFINED__ -#define __Interval_FWD_DEFINED__ - -#ifdef __cplusplus -typedef class Interval Interval; -#else -typedef struct Interval Interval; -#endif /* __cplusplus */ - -#endif /* __Interval_FWD_DEFINED__ */ - - -#ifndef __QueryParserManager_FWD_DEFINED__ -#define __QueryParserManager_FWD_DEFINED__ - -#ifdef __cplusplus -typedef class QueryParserManager QueryParserManager; -#else -typedef struct QueryParserManager QueryParserManager; -#endif /* __cplusplus */ - -#endif /* __QueryParserManager_FWD_DEFINED__ */ - - -/* header files for imported files */ -#include "oaidl.h" -#include "ocidl.h" -#include "propidl.h" - -#ifdef __cplusplus -extern "C"{ -#endif - - -/* interface __MIDL_itf_structuredquery_0000_0000 */ -/* [local] */ - - - - - - - - - - - -typedef /* [v1_enum] */ -enum tagCONDITION_TYPE - { CT_AND_CONDITION = 0, - CT_OR_CONDITION = ( CT_AND_CONDITION + 1 ) , - CT_NOT_CONDITION = ( CT_OR_CONDITION + 1 ) , - CT_LEAF_CONDITION = ( CT_NOT_CONDITION + 1 ) - } CONDITION_TYPE; - -typedef /* [v1_enum] */ -enum tagCONDITION_OPERATION - { COP_IMPLICIT = 0, - COP_EQUAL = ( COP_IMPLICIT + 1 ) , - COP_NOTEQUAL = ( COP_EQUAL + 1 ) , - COP_LESSTHAN = ( COP_NOTEQUAL + 1 ) , - COP_GREATERTHAN = ( COP_LESSTHAN + 1 ) , - COP_LESSTHANOREQUAL = ( COP_GREATERTHAN + 1 ) , - COP_GREATERTHANOREQUAL = ( COP_LESSTHANOREQUAL + 1 ) , - COP_VALUE_STARTSWITH = ( COP_GREATERTHANOREQUAL + 1 ) , - COP_VALUE_ENDSWITH = ( COP_VALUE_STARTSWITH + 1 ) , - COP_VALUE_CONTAINS = ( COP_VALUE_ENDSWITH + 1 ) , - COP_VALUE_NOTCONTAINS = ( COP_VALUE_CONTAINS + 1 ) , - COP_DOSWILDCARDS = ( COP_VALUE_NOTCONTAINS + 1 ) , - COP_WORD_EQUAL = ( COP_DOSWILDCARDS + 1 ) , - COP_WORD_STARTSWITH = ( COP_WORD_EQUAL + 1 ) , - COP_APPLICATION_SPECIFIC = ( COP_WORD_STARTSWITH + 1 ) - } CONDITION_OPERATION; - -typedef /* [v1_enum] */ -enum tagSTRUCTURED_QUERY_SINGLE_OPTION - { SQSO_SCHEMA = 0, - SQSO_LOCALE_WORD_BREAKING = ( SQSO_SCHEMA + 1 ) , - SQSO_WORD_BREAKER = ( SQSO_LOCALE_WORD_BREAKING + 1 ) , - SQSO_NATURAL_SYNTAX = ( SQSO_WORD_BREAKER + 1 ) , - SQSO_AUTOMATIC_WILDCARD = ( SQSO_NATURAL_SYNTAX + 1 ) , - SQSO_TRACE_LEVEL = ( SQSO_AUTOMATIC_WILDCARD + 1 ) , - SQSO_LANGUAGE_KEYWORDS = ( SQSO_TRACE_LEVEL + 1 ) - } STRUCTURED_QUERY_SINGLE_OPTION; - -typedef /* [v1_enum] */ -enum tagSTRUCTURED_QUERY_MULTIOPTION - { SQMO_VIRTUAL_PROPERTY = 0, - SQMO_DEFAULT_PROPERTY = ( SQMO_VIRTUAL_PROPERTY + 1 ) , - SQMO_GENERATOR_FOR_TYPE = ( SQMO_DEFAULT_PROPERTY + 1 ) - } STRUCTURED_QUERY_MULTIOPTION; - -typedef /* [v1_enum] */ -enum tagSTRUCTURED_QUERY_PARSE_ERROR - { SQPE_NONE = 0, - SQPE_EXTRA_OPENING_PARENTHESIS = ( SQPE_NONE + 1 ) , - SQPE_EXTRA_CLOSING_PARENTHESIS = ( SQPE_EXTRA_OPENING_PARENTHESIS + 1 ) , - SQPE_IGNORED_MODIFIER = ( SQPE_EXTRA_CLOSING_PARENTHESIS + 1 ) , - SQPE_IGNORED_CONNECTOR = ( SQPE_IGNORED_MODIFIER + 1 ) , - SQPE_IGNORED_KEYWORD = ( SQPE_IGNORED_CONNECTOR + 1 ) , - SQPE_UNHANDLED = ( SQPE_IGNORED_KEYWORD + 1 ) - } STRUCTURED_QUERY_PARSE_ERROR; - -/* [v1_enum] */ -enum tagSTRUCTURED_QUERY_RESOLVE_OPTION - { SQRO_DONT_RESOLVE_DATETIME = 0x1, - SQRO_ALWAYS_ONE_INTERVAL = 0x2, - SQRO_DONT_SIMPLIFY_CONDITION_TREES = 0x4, - SQRO_DONT_MAP_RELATIONS = 0x8, - SQRO_DONT_RESOLVE_RANGES = 0x10, - SQRO_DONT_REMOVE_UNRESTRICTED_KEYWORDS = 0x20, - SQRO_DONT_SPLIT_WORDS = 0x40, - SQRO_IGNORE_PHRASE_ORDER = 0x80 - } ; -typedef int STRUCTURED_QUERY_RESOLVE_OPTION; - -typedef /* [v1_enum] */ -enum tagINTERVAL_LIMIT_KIND - { ILK_EXPLICIT_INCLUDED = 0, - ILK_EXPLICIT_EXCLUDED = ( ILK_EXPLICIT_INCLUDED + 1 ) , - ILK_NEGATIVE_INFINITY = ( ILK_EXPLICIT_EXCLUDED + 1 ) , - ILK_POSITIVE_INFINITY = ( ILK_NEGATIVE_INFINITY + 1 ) - } INTERVAL_LIMIT_KIND; - -typedef /* [v1_enum] */ -enum tagQUERY_PARSER_MANAGER_OPTION - { QPMO_SCHEMA_BINARY_NAME = 0, - QPMO_PRELOCALIZED_SCHEMA_BINARY_PATH = ( QPMO_SCHEMA_BINARY_NAME + 1 ) , - QPMO_UNLOCALIZED_SCHEMA_BINARY_PATH = ( QPMO_PRELOCALIZED_SCHEMA_BINARY_PATH + 1 ) , - QPMO_LOCALIZED_SCHEMA_BINARY_PATH = ( QPMO_UNLOCALIZED_SCHEMA_BINARY_PATH + 1 ) , - QPMO_APPEND_LCID_TO_LOCALIZED_PATH = ( QPMO_LOCALIZED_SCHEMA_BINARY_PATH + 1 ) , - QPMO_LOCALIZER_SUPPORT = ( QPMO_APPEND_LCID_TO_LOCALIZED_PATH + 1 ) - } QUERY_PARSER_MANAGER_OPTION; - - - -extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0000_v0_0_c_ifspec; -extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0000_v0_0_s_ifspec; - -#ifndef __IQueryParser_INTERFACE_DEFINED__ -#define __IQueryParser_INTERFACE_DEFINED__ - -/* interface IQueryParser */ -/* [unique][uuid][object] */ - - -EXTERN_C const IID IID_IQueryParser; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("2EBDEE67-3505-43f8-9946-EA44ABC8E5B0") - IQueryParser : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE Parse( - /* [in] */ __RPC__in LPCWSTR pszInputString, - /* [in] */ __RPC__in_opt IEnumUnknown *pCustomProperties, - /* [retval][out] */ __RPC__deref_out_opt IQuerySolution **ppSolution) = 0; - - virtual HRESULT STDMETHODCALLTYPE SetOption( - /* [in] */ STRUCTURED_QUERY_SINGLE_OPTION option, - /* [in] */ __RPC__in const PROPVARIANT *pOptionValue) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetOption( - /* [in] */ STRUCTURED_QUERY_SINGLE_OPTION option, - /* [retval][out] */ __RPC__out PROPVARIANT *pOptionValue) = 0; - - virtual HRESULT STDMETHODCALLTYPE SetMultiOption( - /* [in] */ STRUCTURED_QUERY_MULTIOPTION option, - /* [in] */ __RPC__in LPCWSTR pszOptionKey, - /* [in] */ __RPC__in const PROPVARIANT *pOptionValue) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetSchemaProvider( - /* [retval][out] */ __RPC__deref_out_opt ISchemaProvider **ppSchemaProvider) = 0; - - virtual HRESULT STDMETHODCALLTYPE RestateToString( - /* [in] */ __RPC__in_opt ICondition *pCondition, - /* [in] */ BOOL fUseEnglish, - /* [out] */ __RPC__deref_out_opt LPWSTR *ppszQueryString) = 0; - - virtual HRESULT STDMETHODCALLTYPE ParsePropertyValue( - /* [in] */ __RPC__in LPCWSTR pszPropertyName, - /* [in] */ __RPC__in LPCWSTR pszInputString, - /* [retval][out] */ __RPC__deref_out_opt IQuerySolution **ppSolution) = 0; - - virtual HRESULT STDMETHODCALLTYPE RestatePropertyValueToString( - /* [in] */ __RPC__in_opt ICondition *pCondition, - /* [in] */ BOOL fUseEnglish, - /* [out] */ __RPC__deref_out_opt LPWSTR *ppszPropertyName, - /* [out] */ __RPC__deref_out_opt LPWSTR *ppszQueryString) = 0; - - }; - -#else /* C style interface */ - - typedef struct IQueryParserVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IQueryParser * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IQueryParser * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IQueryParser * This); - - HRESULT ( STDMETHODCALLTYPE *Parse )( - IQueryParser * This, - /* [in] */ __RPC__in LPCWSTR pszInputString, - /* [in] */ __RPC__in_opt IEnumUnknown *pCustomProperties, - /* [retval][out] */ __RPC__deref_out_opt IQuerySolution **ppSolution); - - HRESULT ( STDMETHODCALLTYPE *SetOption )( - IQueryParser * This, - /* [in] */ STRUCTURED_QUERY_SINGLE_OPTION option, - /* [in] */ __RPC__in const PROPVARIANT *pOptionValue); - - HRESULT ( STDMETHODCALLTYPE *GetOption )( - IQueryParser * This, - /* [in] */ STRUCTURED_QUERY_SINGLE_OPTION option, - /* [retval][out] */ __RPC__out PROPVARIANT *pOptionValue); - - HRESULT ( STDMETHODCALLTYPE *SetMultiOption )( - IQueryParser * This, - /* [in] */ STRUCTURED_QUERY_MULTIOPTION option, - /* [in] */ __RPC__in LPCWSTR pszOptionKey, - /* [in] */ __RPC__in const PROPVARIANT *pOptionValue); - - HRESULT ( STDMETHODCALLTYPE *GetSchemaProvider )( - IQueryParser * This, - /* [retval][out] */ __RPC__deref_out_opt ISchemaProvider **ppSchemaProvider); - - HRESULT ( STDMETHODCALLTYPE *RestateToString )( - IQueryParser * This, - /* [in] */ __RPC__in_opt ICondition *pCondition, - /* [in] */ BOOL fUseEnglish, - /* [out] */ __RPC__deref_out_opt LPWSTR *ppszQueryString); - - HRESULT ( STDMETHODCALLTYPE *ParsePropertyValue )( - IQueryParser * This, - /* [in] */ __RPC__in LPCWSTR pszPropertyName, - /* [in] */ __RPC__in LPCWSTR pszInputString, - /* [retval][out] */ __RPC__deref_out_opt IQuerySolution **ppSolution); - - HRESULT ( STDMETHODCALLTYPE *RestatePropertyValueToString )( - IQueryParser * This, - /* [in] */ __RPC__in_opt ICondition *pCondition, - /* [in] */ BOOL fUseEnglish, - /* [out] */ __RPC__deref_out_opt LPWSTR *ppszPropertyName, - /* [out] */ __RPC__deref_out_opt LPWSTR *ppszQueryString); - - END_INTERFACE - } IQueryParserVtbl; - - interface IQueryParser - { - CONST_VTBL struct IQueryParserVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IQueryParser_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IQueryParser_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IQueryParser_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IQueryParser_Parse(This,pszInputString,pCustomProperties,ppSolution) \ - ( (This)->lpVtbl -> Parse(This,pszInputString,pCustomProperties,ppSolution) ) - -#define IQueryParser_SetOption(This,option,pOptionValue) \ - ( (This)->lpVtbl -> SetOption(This,option,pOptionValue) ) - -#define IQueryParser_GetOption(This,option,pOptionValue) \ - ( (This)->lpVtbl -> GetOption(This,option,pOptionValue) ) - -#define IQueryParser_SetMultiOption(This,option,pszOptionKey,pOptionValue) \ - ( (This)->lpVtbl -> SetMultiOption(This,option,pszOptionKey,pOptionValue) ) - -#define IQueryParser_GetSchemaProvider(This,ppSchemaProvider) \ - ( (This)->lpVtbl -> GetSchemaProvider(This,ppSchemaProvider) ) - -#define IQueryParser_RestateToString(This,pCondition,fUseEnglish,ppszQueryString) \ - ( (This)->lpVtbl -> RestateToString(This,pCondition,fUseEnglish,ppszQueryString) ) - -#define IQueryParser_ParsePropertyValue(This,pszPropertyName,pszInputString,ppSolution) \ - ( (This)->lpVtbl -> ParsePropertyValue(This,pszPropertyName,pszInputString,ppSolution) ) - -#define IQueryParser_RestatePropertyValueToString(This,pCondition,fUseEnglish,ppszPropertyName,ppszQueryString) \ - ( (This)->lpVtbl -> RestatePropertyValueToString(This,pCondition,fUseEnglish,ppszPropertyName,ppszQueryString) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IQueryParser_INTERFACE_DEFINED__ */ - - -#ifndef __IConditionFactory_INTERFACE_DEFINED__ -#define __IConditionFactory_INTERFACE_DEFINED__ - -/* interface IConditionFactory */ -/* [unique][uuid][object] */ - - -EXTERN_C const IID IID_IConditionFactory; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("A5EFE073-B16F-474f-9F3E-9F8B497A3E08") - IConditionFactory : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE MakeNot( - /* [in] */ __RPC__in_opt ICondition *pSubCondition, - /* [in] */ BOOL simplify, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery) = 0; - - virtual HRESULT STDMETHODCALLTYPE MakeAndOr( - /* [in] */ CONDITION_TYPE nodeType, - /* [in] */ __RPC__in_opt IEnumUnknown *pSubConditions, - /* [in] */ BOOL simplify, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery) = 0; - - virtual HRESULT STDMETHODCALLTYPE MakeLeaf( - /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName, - /* [in] */ CONDITION_OPERATION op, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType, - /* [in] */ __RPC__in const PROPVARIANT *pValue, - /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm, - /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm, - /* [in] */ __RPC__in_opt IRichChunk *pValueTerm, - /* [in] */ BOOL expand, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE Resolve( - /* [in] */ - __in ICondition *pConditionTree, - /* [in] */ - __in STRUCTURED_QUERY_RESOLVE_OPTION sqro, - /* [ref][in] */ - __in_opt const SYSTEMTIME *pstReferenceTime, - /* [retval][out] */ - __out ICondition **ppResolvedConditionTree) = 0; - - }; - -#else /* C style interface */ - - typedef struct IConditionFactoryVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IConditionFactory * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IConditionFactory * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IConditionFactory * This); - - HRESULT ( STDMETHODCALLTYPE *MakeNot )( - IConditionFactory * This, - /* [in] */ __RPC__in_opt ICondition *pSubCondition, - /* [in] */ BOOL simplify, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery); - - HRESULT ( STDMETHODCALLTYPE *MakeAndOr )( - IConditionFactory * This, - /* [in] */ CONDITION_TYPE nodeType, - /* [in] */ __RPC__in_opt IEnumUnknown *pSubConditions, - /* [in] */ BOOL simplify, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery); - - HRESULT ( STDMETHODCALLTYPE *MakeLeaf )( - IConditionFactory * This, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName, - /* [in] */ CONDITION_OPERATION op, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType, - /* [in] */ __RPC__in const PROPVARIANT *pValue, - /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm, - /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm, - /* [in] */ __RPC__in_opt IRichChunk *pValueTerm, - /* [in] */ BOOL expand, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *Resolve )( - IConditionFactory * This, - /* [in] */ - __in ICondition *pConditionTree, - /* [in] */ - __in STRUCTURED_QUERY_RESOLVE_OPTION sqro, - /* [ref][in] */ - __in_opt const SYSTEMTIME *pstReferenceTime, - /* [retval][out] */ - __out ICondition **ppResolvedConditionTree); - - END_INTERFACE - } IConditionFactoryVtbl; - - interface IConditionFactory - { - CONST_VTBL struct IConditionFactoryVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IConditionFactory_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IConditionFactory_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IConditionFactory_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IConditionFactory_MakeNot(This,pSubCondition,simplify,ppResultQuery) \ - ( (This)->lpVtbl -> MakeNot(This,pSubCondition,simplify,ppResultQuery) ) - -#define IConditionFactory_MakeAndOr(This,nodeType,pSubConditions,simplify,ppResultQuery) \ - ( (This)->lpVtbl -> MakeAndOr(This,nodeType,pSubConditions,simplify,ppResultQuery) ) - -#define IConditionFactory_MakeLeaf(This,pszPropertyName,op,pszValueType,pValue,pPropertyNameTerm,pOperationTerm,pValueTerm,expand,ppResultQuery) \ - ( (This)->lpVtbl -> MakeLeaf(This,pszPropertyName,op,pszValueType,pValue,pPropertyNameTerm,pOperationTerm,pValueTerm,expand,ppResultQuery) ) - -#define IConditionFactory_Resolve(This,pConditionTree,sqro,pstReferenceTime,ppResolvedConditionTree) \ - ( (This)->lpVtbl -> Resolve(This,pConditionTree,sqro,pstReferenceTime,ppResolvedConditionTree) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IConditionFactory_INTERFACE_DEFINED__ */ - - -#ifndef __IQuerySolution_INTERFACE_DEFINED__ -#define __IQuerySolution_INTERFACE_DEFINED__ - -/* interface IQuerySolution */ -/* [unique][uuid][object] */ - - -EXTERN_C const IID IID_IQuerySolution; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("D6EBC66B-8921-4193-AFDD-A1789FB7FF57") - IQuerySolution : public IConditionFactory - { - public: - virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetQuery( - /* [out] */ - __out_opt ICondition **ppQueryNode, - /* [out] */ - __out_opt IEntity **ppMainType) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetErrors( - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppParseErrors) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetLexicalData( - /* [out] */ - __deref_opt_out LPWSTR *ppszInputString, - /* [out] */ - __out_opt ITokenCollection **ppTokens, - /* [out] */ - __out_opt LCID *pLocale, - /* [out] */ - __out_opt IUnknown **ppWordBreaker) = 0; - - }; - -#else /* C style interface */ - - typedef struct IQuerySolutionVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IQuerySolution * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IQuerySolution * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IQuerySolution * This); - - HRESULT ( STDMETHODCALLTYPE *MakeNot )( - IQuerySolution * This, - /* [in] */ __RPC__in_opt ICondition *pSubCondition, - /* [in] */ BOOL simplify, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery); - - HRESULT ( STDMETHODCALLTYPE *MakeAndOr )( - IQuerySolution * This, - /* [in] */ CONDITION_TYPE nodeType, - /* [in] */ __RPC__in_opt IEnumUnknown *pSubConditions, - /* [in] */ BOOL simplify, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery); - - HRESULT ( STDMETHODCALLTYPE *MakeLeaf )( - IQuerySolution * This, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName, - /* [in] */ CONDITION_OPERATION op, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType, - /* [in] */ __RPC__in const PROPVARIANT *pValue, - /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm, - /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm, - /* [in] */ __RPC__in_opt IRichChunk *pValueTerm, - /* [in] */ BOOL expand, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *Resolve )( - IQuerySolution * This, - /* [in] */ - __in ICondition *pConditionTree, - /* [in] */ - __in STRUCTURED_QUERY_RESOLVE_OPTION sqro, - /* [ref][in] */ - __in_opt const SYSTEMTIME *pstReferenceTime, - /* [retval][out] */ - __out ICondition **ppResolvedConditionTree); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetQuery )( - IQuerySolution * This, - /* [out] */ - __out_opt ICondition **ppQueryNode, - /* [out] */ - __out_opt IEntity **ppMainType); - - HRESULT ( STDMETHODCALLTYPE *GetErrors )( - IQuerySolution * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppParseErrors); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetLexicalData )( - IQuerySolution * This, - /* [out] */ - __deref_opt_out LPWSTR *ppszInputString, - /* [out] */ - __out_opt ITokenCollection **ppTokens, - /* [out] */ - __out_opt LCID *pLocale, - /* [out] */ - __out_opt IUnknown **ppWordBreaker); - - END_INTERFACE - } IQuerySolutionVtbl; - - interface IQuerySolution - { - CONST_VTBL struct IQuerySolutionVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IQuerySolution_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IQuerySolution_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IQuerySolution_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IQuerySolution_MakeNot(This,pSubCondition,simplify,ppResultQuery) \ - ( (This)->lpVtbl -> MakeNot(This,pSubCondition,simplify,ppResultQuery) ) - -#define IQuerySolution_MakeAndOr(This,nodeType,pSubConditions,simplify,ppResultQuery) \ - ( (This)->lpVtbl -> MakeAndOr(This,nodeType,pSubConditions,simplify,ppResultQuery) ) - -#define IQuerySolution_MakeLeaf(This,pszPropertyName,op,pszValueType,pValue,pPropertyNameTerm,pOperationTerm,pValueTerm,expand,ppResultQuery) \ - ( (This)->lpVtbl -> MakeLeaf(This,pszPropertyName,op,pszValueType,pValue,pPropertyNameTerm,pOperationTerm,pValueTerm,expand,ppResultQuery) ) - -#define IQuerySolution_Resolve(This,pConditionTree,sqro,pstReferenceTime,ppResolvedConditionTree) \ - ( (This)->lpVtbl -> Resolve(This,pConditionTree,sqro,pstReferenceTime,ppResolvedConditionTree) ) - - -#define IQuerySolution_GetQuery(This,ppQueryNode,ppMainType) \ - ( (This)->lpVtbl -> GetQuery(This,ppQueryNode,ppMainType) ) - -#define IQuerySolution_GetErrors(This,riid,ppParseErrors) \ - ( (This)->lpVtbl -> GetErrors(This,riid,ppParseErrors) ) - -#define IQuerySolution_GetLexicalData(This,ppszInputString,ppTokens,pLocale,ppWordBreaker) \ - ( (This)->lpVtbl -> GetLexicalData(This,ppszInputString,ppTokens,pLocale,ppWordBreaker) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IQuerySolution_INTERFACE_DEFINED__ */ - - -#ifndef __ICondition_INTERFACE_DEFINED__ -#define __ICondition_INTERFACE_DEFINED__ - -/* interface ICondition */ -/* [unique][uuid][object] */ - - -EXTERN_C const IID IID_ICondition; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("0FC988D4-C935-4b97-A973-46282EA175C8") - ICondition : public IPersistStream - { - public: - virtual HRESULT STDMETHODCALLTYPE GetConditionType( - /* [retval][out] */ __RPC__out CONDITION_TYPE *pNodeType) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetSubConditions( - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppv) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetComparisonInfo( - /* [out] */ - __deref_opt_out LPWSTR *ppszPropertyName, - /* [out] */ - __out_opt CONDITION_OPERATION *pOperation, - /* [out] */ - __out_opt PROPVARIANT *pValue) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetValueType( - /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszValueTypeName) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetValueNormalization( - /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszNormalization) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetInputTerms( - /* [out] */ - __out_opt IRichChunk **ppPropertyTerm, - /* [out] */ - __out_opt IRichChunk **ppOperationTerm, - /* [out] */ - __out_opt IRichChunk **ppValueTerm) = 0; - - virtual HRESULT STDMETHODCALLTYPE Clone( - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppc) = 0; - - }; - -#else /* C style interface */ - - typedef struct IConditionVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - ICondition * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - ICondition * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - ICondition * This); - - HRESULT ( STDMETHODCALLTYPE *GetClassID )( - ICondition * This, - /* [out] */ __RPC__out CLSID *pClassID); - - HRESULT ( STDMETHODCALLTYPE *IsDirty )( - ICondition * This); - - HRESULT ( STDMETHODCALLTYPE *Load )( - ICondition * This, - /* [unique][in] */ __RPC__in_opt IStream *pStm); - - HRESULT ( STDMETHODCALLTYPE *Save )( - ICondition * This, - /* [unique][in] */ __RPC__in_opt IStream *pStm, - /* [in] */ BOOL fClearDirty); - - HRESULT ( STDMETHODCALLTYPE *GetSizeMax )( - ICondition * This, - /* [out] */ __RPC__out ULARGE_INTEGER *pcbSize); - - HRESULT ( STDMETHODCALLTYPE *GetConditionType )( - ICondition * This, - /* [retval][out] */ __RPC__out CONDITION_TYPE *pNodeType); - - HRESULT ( STDMETHODCALLTYPE *GetSubConditions )( - ICondition * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppv); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetComparisonInfo )( - ICondition * This, - /* [out] */ - __deref_opt_out LPWSTR *ppszPropertyName, - /* [out] */ - __out_opt CONDITION_OPERATION *pOperation, - /* [out] */ - __out_opt PROPVARIANT *pValue); - - HRESULT ( STDMETHODCALLTYPE *GetValueType )( - ICondition * This, - /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszValueTypeName); - - HRESULT ( STDMETHODCALLTYPE *GetValueNormalization )( - ICondition * This, - /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszNormalization); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetInputTerms )( - ICondition * This, - /* [out] */ - __out_opt IRichChunk **ppPropertyTerm, - /* [out] */ - __out_opt IRichChunk **ppOperationTerm, - /* [out] */ - __out_opt IRichChunk **ppValueTerm); - - HRESULT ( STDMETHODCALLTYPE *Clone )( - ICondition * This, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppc); - - END_INTERFACE - } IConditionVtbl; - - interface ICondition - { - CONST_VTBL struct IConditionVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define ICondition_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define ICondition_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define ICondition_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define ICondition_GetClassID(This,pClassID) \ - ( (This)->lpVtbl -> GetClassID(This,pClassID) ) - - -#define ICondition_IsDirty(This) \ - ( (This)->lpVtbl -> IsDirty(This) ) - -#define ICondition_Load(This,pStm) \ - ( (This)->lpVtbl -> Load(This,pStm) ) - -#define ICondition_Save(This,pStm,fClearDirty) \ - ( (This)->lpVtbl -> Save(This,pStm,fClearDirty) ) - -#define ICondition_GetSizeMax(This,pcbSize) \ - ( (This)->lpVtbl -> GetSizeMax(This,pcbSize) ) - - -#define ICondition_GetConditionType(This,pNodeType) \ - ( (This)->lpVtbl -> GetConditionType(This,pNodeType) ) - -#define ICondition_GetSubConditions(This,riid,ppv) \ - ( (This)->lpVtbl -> GetSubConditions(This,riid,ppv) ) - -#define ICondition_GetComparisonInfo(This,ppszPropertyName,pOperation,pValue) \ - ( (This)->lpVtbl -> GetComparisonInfo(This,ppszPropertyName,pOperation,pValue) ) - -#define ICondition_GetValueType(This,ppszValueTypeName) \ - ( (This)->lpVtbl -> GetValueType(This,ppszValueTypeName) ) - -#define ICondition_GetValueNormalization(This,ppszNormalization) \ - ( (This)->lpVtbl -> GetValueNormalization(This,ppszNormalization) ) - -#define ICondition_GetInputTerms(This,ppPropertyTerm,ppOperationTerm,ppValueTerm) \ - ( (This)->lpVtbl -> GetInputTerms(This,ppPropertyTerm,ppOperationTerm,ppValueTerm) ) - -#define ICondition_Clone(This,ppc) \ - ( (This)->lpVtbl -> Clone(This,ppc) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __ICondition_INTERFACE_DEFINED__ */ - - -#ifndef __IConditionGenerator_INTERFACE_DEFINED__ -#define __IConditionGenerator_INTERFACE_DEFINED__ - -/* interface IConditionGenerator */ -/* [unique][uuid][object] */ - - -EXTERN_C const IID IID_IConditionGenerator; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("92D2CC58-4386-45a3-B98C-7E0CE64A4117") - IConditionGenerator : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE Initialize( - /* [in] */ __RPC__in_opt ISchemaProvider *pSchemaProvider) = 0; - - virtual HRESULT STDMETHODCALLTYPE RecognizeNamedEntities( - /* [in] */ __RPC__in LPCWSTR pszInputString, - /* [in] */ LCID lcid, - /* [in] */ __RPC__in_opt ITokenCollection *pTokenCollection, - /* [out][in] */ __RPC__inout_opt INamedEntityCollector *pNamedEntities) = 0; - - virtual HRESULT STDMETHODCALLTYPE GenerateForLeaf( - /* [in] */ __RPC__in_opt IConditionFactory *pConditionFactory, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName, - /* [in] */ CONDITION_OPERATION op, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType, - /* [in] */ __RPC__in LPCWSTR pszValue, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszValue2, - /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm, - /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm, - /* [in] */ __RPC__in_opt IRichChunk *pValueTerm, - /* [in] */ BOOL automaticWildcard, - /* [out] */ __RPC__out BOOL *pNoStringQuery, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppQueryExpression) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE DefaultPhrase( - /* [unique][in] */ LPCWSTR pszValueType, - /* [in] */ const PROPVARIANT *ppropvar, - /* [in] */ BOOL fUseEnglish, - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszPhrase) = 0; - - }; - -#else /* C style interface */ - - typedef struct IConditionGeneratorVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IConditionGenerator * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IConditionGenerator * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IConditionGenerator * This); - - HRESULT ( STDMETHODCALLTYPE *Initialize )( - IConditionGenerator * This, - /* [in] */ __RPC__in_opt ISchemaProvider *pSchemaProvider); - - HRESULT ( STDMETHODCALLTYPE *RecognizeNamedEntities )( - IConditionGenerator * This, - /* [in] */ __RPC__in LPCWSTR pszInputString, - /* [in] */ LCID lcid, - /* [in] */ __RPC__in_opt ITokenCollection *pTokenCollection, - /* [out][in] */ __RPC__inout_opt INamedEntityCollector *pNamedEntities); - - HRESULT ( STDMETHODCALLTYPE *GenerateForLeaf )( - IConditionGenerator * This, - /* [in] */ __RPC__in_opt IConditionFactory *pConditionFactory, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName, - /* [in] */ CONDITION_OPERATION op, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType, - /* [in] */ __RPC__in LPCWSTR pszValue, - /* [unique][in] */ __RPC__in_opt LPCWSTR pszValue2, - /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm, - /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm, - /* [in] */ __RPC__in_opt IRichChunk *pValueTerm, - /* [in] */ BOOL automaticWildcard, - /* [out] */ __RPC__out BOOL *pNoStringQuery, - /* [retval][out] */ __RPC__deref_out_opt ICondition **ppQueryExpression); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *DefaultPhrase )( - IConditionGenerator * This, - /* [unique][in] */ LPCWSTR pszValueType, - /* [in] */ const PROPVARIANT *ppropvar, - /* [in] */ BOOL fUseEnglish, - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszPhrase); - - END_INTERFACE - } IConditionGeneratorVtbl; - - interface IConditionGenerator - { - CONST_VTBL struct IConditionGeneratorVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IConditionGenerator_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IConditionGenerator_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IConditionGenerator_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IConditionGenerator_Initialize(This,pSchemaProvider) \ - ( (This)->lpVtbl -> Initialize(This,pSchemaProvider) ) - -#define IConditionGenerator_RecognizeNamedEntities(This,pszInputString,lcid,pTokenCollection,pNamedEntities) \ - ( (This)->lpVtbl -> RecognizeNamedEntities(This,pszInputString,lcid,pTokenCollection,pNamedEntities) ) - -#define IConditionGenerator_GenerateForLeaf(This,pConditionFactory,pszPropertyName,op,pszValueType,pszValue,pszValue2,pPropertyNameTerm,pOperationTerm,pValueTerm,automaticWildcard,pNoStringQuery,ppQueryExpression) \ - ( (This)->lpVtbl -> GenerateForLeaf(This,pConditionFactory,pszPropertyName,op,pszValueType,pszValue,pszValue2,pPropertyNameTerm,pOperationTerm,pValueTerm,automaticWildcard,pNoStringQuery,ppQueryExpression) ) - -#define IConditionGenerator_DefaultPhrase(This,pszValueType,ppropvar,fUseEnglish,ppszPhrase) \ - ( (This)->lpVtbl -> DefaultPhrase(This,pszValueType,ppropvar,fUseEnglish,ppszPhrase) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IConditionGenerator_INTERFACE_DEFINED__ */ - - -#ifndef __IRichChunk_INTERFACE_DEFINED__ -#define __IRichChunk_INTERFACE_DEFINED__ - -/* interface IRichChunk */ -/* [unique][uuid][object] */ - - -EXTERN_C const IID IID_IRichChunk; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("4FDEF69C-DBC9-454e-9910-B34F3C64B510") - IRichChunk : public IUnknown - { - public: - virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetData( - /* [out] */ - __out_opt ULONG *pFirstPos, - /* [out] */ - __out_opt ULONG *pLength, - /* [out] */ - __deref_opt_out LPWSTR *ppsz, - /* [out] */ - __out_opt PROPVARIANT *pValue) = 0; - - }; - -#else /* C style interface */ - - typedef struct IRichChunkVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IRichChunk * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IRichChunk * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IRichChunk * This); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetData )( - IRichChunk * This, - /* [out] */ - __out_opt ULONG *pFirstPos, - /* [out] */ - __out_opt ULONG *pLength, - /* [out] */ - __deref_opt_out LPWSTR *ppsz, - /* [out] */ - __out_opt PROPVARIANT *pValue); - - END_INTERFACE - } IRichChunkVtbl; - - interface IRichChunk - { - CONST_VTBL struct IRichChunkVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IRichChunk_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IRichChunk_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IRichChunk_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IRichChunk_GetData(This,pFirstPos,pLength,ppsz,pValue) \ - ( (This)->lpVtbl -> GetData(This,pFirstPos,pLength,ppsz,pValue) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IRichChunk_INTERFACE_DEFINED__ */ - - -#ifndef __IInterval_INTERFACE_DEFINED__ -#define __IInterval_INTERFACE_DEFINED__ - -/* interface IInterval */ -/* [unique][uuid][object] */ - - -EXTERN_C const IID IID_IInterval; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("6BF0A714-3C18-430b-8B5D-83B1C234D3DB") - IInterval : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE GetLimits( - /* [out] */ __RPC__out INTERVAL_LIMIT_KIND *pilkLower, - /* [out] */ __RPC__out PROPVARIANT *ppropvarLower, - /* [out] */ __RPC__out INTERVAL_LIMIT_KIND *pilkUpper, - /* [out] */ __RPC__out PROPVARIANT *ppropvarUpper) = 0; - - }; - -#else /* C style interface */ - - typedef struct IIntervalVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IInterval * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IInterval * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IInterval * This); - - HRESULT ( STDMETHODCALLTYPE *GetLimits )( - IInterval * This, - /* [out] */ __RPC__out INTERVAL_LIMIT_KIND *pilkLower, - /* [out] */ __RPC__out PROPVARIANT *ppropvarLower, - /* [out] */ __RPC__out INTERVAL_LIMIT_KIND *pilkUpper, - /* [out] */ __RPC__out PROPVARIANT *ppropvarUpper); - - END_INTERFACE - } IIntervalVtbl; - - interface IInterval - { - CONST_VTBL struct IIntervalVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IInterval_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IInterval_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IInterval_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IInterval_GetLimits(This,pilkLower,ppropvarLower,pilkUpper,ppropvarUpper) \ - ( (This)->lpVtbl -> GetLimits(This,pilkLower,ppropvarLower,pilkUpper,ppropvarUpper) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IInterval_INTERFACE_DEFINED__ */ - - -#ifndef __IMetaData_INTERFACE_DEFINED__ -#define __IMetaData_INTERFACE_DEFINED__ - -/* interface IMetaData */ -/* [unique][uuid][object][helpstring] */ - - -EXTERN_C const IID IID_IMetaData; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("780102B0-C43B-4876-BC7B-5E9BA5C88794") - IMetaData : public IUnknown - { - public: - virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetData( - /* [out] */ - __deref_opt_out LPWSTR *ppszKey, - /* [out] */ - __deref_opt_out LPWSTR *ppszValue) = 0; - - }; - -#else /* C style interface */ - - typedef struct IMetaDataVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IMetaData * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IMetaData * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IMetaData * This); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetData )( - IMetaData * This, - /* [out] */ - __deref_opt_out LPWSTR *ppszKey, - /* [out] */ - __deref_opt_out LPWSTR *ppszValue); - - END_INTERFACE - } IMetaDataVtbl; - - interface IMetaData - { - CONST_VTBL struct IMetaDataVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IMetaData_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IMetaData_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IMetaData_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IMetaData_GetData(This,ppszKey,ppszValue) \ - ( (This)->lpVtbl -> GetData(This,ppszKey,ppszValue) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IMetaData_INTERFACE_DEFINED__ */ - - -/* interface __MIDL_itf_structuredquery_0000_0008 */ -/* [local] */ - - - - -extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0008_v0_0_c_ifspec; -extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0008_v0_0_s_ifspec; - -#ifndef __IEntity_INTERFACE_DEFINED__ -#define __IEntity_INTERFACE_DEFINED__ - -/* interface IEntity */ -/* [unique][object][uuid][helpstring] */ - - -EXTERN_C const IID IID_IEntity; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("24264891-E80B-4fd3-B7CE-4FF2FAE8931F") - IEntity : public IUnknown - { - public: - virtual /* [local] */ HRESULT STDMETHODCALLTYPE Name( - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszName) = 0; - - virtual HRESULT STDMETHODCALLTYPE Base( - /* [retval][out] */ __RPC__deref_out_opt IEntity **pBaseEntity) = 0; - - virtual HRESULT STDMETHODCALLTYPE Relationships( - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pRelationships) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetRelationship( - /* [in] */ __RPC__in LPCWSTR pszRelationName, - /* [retval][out] */ __RPC__deref_out_opt IRelationship **pRelationship) = 0; - - virtual HRESULT STDMETHODCALLTYPE MetaData( - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData) = 0; - - virtual HRESULT STDMETHODCALLTYPE NamedEntities( - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pNamedEntities) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetNamedEntity( - /* [in] */ __RPC__in LPCWSTR pszValue, - /* [retval][out] */ __RPC__deref_out_opt INamedEntity **ppNamedEntity) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE DefaultPhrase( - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszPhrase) = 0; - - }; - -#else /* C style interface */ - - typedef struct IEntityVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IEntity * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IEntity * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IEntity * This); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *Name )( - IEntity * This, - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszName); - - HRESULT ( STDMETHODCALLTYPE *Base )( - IEntity * This, - /* [retval][out] */ __RPC__deref_out_opt IEntity **pBaseEntity); - - HRESULT ( STDMETHODCALLTYPE *Relationships )( - IEntity * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pRelationships); - - HRESULT ( STDMETHODCALLTYPE *GetRelationship )( - IEntity * This, - /* [in] */ __RPC__in LPCWSTR pszRelationName, - /* [retval][out] */ __RPC__deref_out_opt IRelationship **pRelationship); - - HRESULT ( STDMETHODCALLTYPE *MetaData )( - IEntity * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData); - - HRESULT ( STDMETHODCALLTYPE *NamedEntities )( - IEntity * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pNamedEntities); - - HRESULT ( STDMETHODCALLTYPE *GetNamedEntity )( - IEntity * This, - /* [in] */ __RPC__in LPCWSTR pszValue, - /* [retval][out] */ __RPC__deref_out_opt INamedEntity **ppNamedEntity); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *DefaultPhrase )( - IEntity * This, - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszPhrase); - - END_INTERFACE - } IEntityVtbl; - - interface IEntity - { - CONST_VTBL struct IEntityVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IEntity_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IEntity_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IEntity_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IEntity_Name(This,ppszName) \ - ( (This)->lpVtbl -> Name(This,ppszName) ) - -#define IEntity_Base(This,pBaseEntity) \ - ( (This)->lpVtbl -> Base(This,pBaseEntity) ) - -#define IEntity_Relationships(This,riid,pRelationships) \ - ( (This)->lpVtbl -> Relationships(This,riid,pRelationships) ) - -#define IEntity_GetRelationship(This,pszRelationName,pRelationship) \ - ( (This)->lpVtbl -> GetRelationship(This,pszRelationName,pRelationship) ) - -#define IEntity_MetaData(This,riid,pMetaData) \ - ( (This)->lpVtbl -> MetaData(This,riid,pMetaData) ) - -#define IEntity_NamedEntities(This,riid,pNamedEntities) \ - ( (This)->lpVtbl -> NamedEntities(This,riid,pNamedEntities) ) - -#define IEntity_GetNamedEntity(This,pszValue,ppNamedEntity) \ - ( (This)->lpVtbl -> GetNamedEntity(This,pszValue,ppNamedEntity) ) - -#define IEntity_DefaultPhrase(This,ppszPhrase) \ - ( (This)->lpVtbl -> DefaultPhrase(This,ppszPhrase) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IEntity_INTERFACE_DEFINED__ */ - - -#ifndef __IRelationship_INTERFACE_DEFINED__ -#define __IRelationship_INTERFACE_DEFINED__ - -/* interface IRelationship */ -/* [unique][object][uuid][helpstring] */ - - -EXTERN_C const IID IID_IRelationship; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("2769280B-5108-498c-9C7F-A51239B63147") - IRelationship : public IUnknown - { - public: - virtual /* [local] */ HRESULT STDMETHODCALLTYPE Name( - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszName) = 0; - - virtual HRESULT STDMETHODCALLTYPE IsReal( - /* [retval][out] */ __RPC__out BOOL *pIsReal) = 0; - - virtual HRESULT STDMETHODCALLTYPE Destination( - /* [retval][out] */ __RPC__deref_out_opt IEntity **pDestinationEntity) = 0; - - virtual HRESULT STDMETHODCALLTYPE MetaData( - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE DefaultPhrase( - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszPhrase) = 0; - - }; - -#else /* C style interface */ - - typedef struct IRelationshipVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IRelationship * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IRelationship * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IRelationship * This); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *Name )( - IRelationship * This, - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszName); - - HRESULT ( STDMETHODCALLTYPE *IsReal )( - IRelationship * This, - /* [retval][out] */ __RPC__out BOOL *pIsReal); - - HRESULT ( STDMETHODCALLTYPE *Destination )( - IRelationship * This, - /* [retval][out] */ __RPC__deref_out_opt IEntity **pDestinationEntity); - - HRESULT ( STDMETHODCALLTYPE *MetaData )( - IRelationship * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *DefaultPhrase )( - IRelationship * This, - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszPhrase); - - END_INTERFACE - } IRelationshipVtbl; - - interface IRelationship - { - CONST_VTBL struct IRelationshipVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IRelationship_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IRelationship_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IRelationship_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IRelationship_Name(This,ppszName) \ - ( (This)->lpVtbl -> Name(This,ppszName) ) - -#define IRelationship_IsReal(This,pIsReal) \ - ( (This)->lpVtbl -> IsReal(This,pIsReal) ) - -#define IRelationship_Destination(This,pDestinationEntity) \ - ( (This)->lpVtbl -> Destination(This,pDestinationEntity) ) - -#define IRelationship_MetaData(This,riid,pMetaData) \ - ( (This)->lpVtbl -> MetaData(This,riid,pMetaData) ) - -#define IRelationship_DefaultPhrase(This,ppszPhrase) \ - ( (This)->lpVtbl -> DefaultPhrase(This,ppszPhrase) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IRelationship_INTERFACE_DEFINED__ */ - - -#ifndef __INamedEntity_INTERFACE_DEFINED__ -#define __INamedEntity_INTERFACE_DEFINED__ - -/* interface INamedEntity */ -/* [unique][uuid][object][helpstring] */ - - -EXTERN_C const IID IID_INamedEntity; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("ABDBD0B1-7D54-49fb-AB5C-BFF4130004CD") - INamedEntity : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE GetValue( - /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszValue) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE DefaultPhrase( - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszPhrase) = 0; - - }; - -#else /* C style interface */ - - typedef struct INamedEntityVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - INamedEntity * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - INamedEntity * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - INamedEntity * This); - - HRESULT ( STDMETHODCALLTYPE *GetValue )( - INamedEntity * This, - /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszValue); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *DefaultPhrase )( - INamedEntity * This, - /* [retval][out] */ - __deref_opt_out LPWSTR *ppszPhrase); - - END_INTERFACE - } INamedEntityVtbl; - - interface INamedEntity - { - CONST_VTBL struct INamedEntityVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define INamedEntity_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define INamedEntity_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define INamedEntity_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define INamedEntity_GetValue(This,ppszValue) \ - ( (This)->lpVtbl -> GetValue(This,ppszValue) ) - -#define INamedEntity_DefaultPhrase(This,ppszPhrase) \ - ( (This)->lpVtbl -> DefaultPhrase(This,ppszPhrase) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __INamedEntity_INTERFACE_DEFINED__ */ - - -#ifndef __ISchemaProvider_INTERFACE_DEFINED__ -#define __ISchemaProvider_INTERFACE_DEFINED__ - -/* interface ISchemaProvider */ -/* [unique][object][uuid][helpstring] */ - - -EXTERN_C const IID IID_ISchemaProvider; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("8CF89BCB-394C-49b2-AE28-A59DD4ED7F68") - ISchemaProvider : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE Entities( - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pEntities) = 0; - - virtual HRESULT STDMETHODCALLTYPE RootEntity( - /* [retval][out] */ __RPC__deref_out_opt IEntity **pRootEntity) = 0; - - virtual HRESULT STDMETHODCALLTYPE GetEntity( - /* [in] */ __RPC__in LPCWSTR pszEntityName, - /* [retval][out] */ __RPC__deref_out_opt IEntity **pEntity) = 0; - - virtual HRESULT STDMETHODCALLTYPE MetaData( - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData) = 0; - - virtual HRESULT STDMETHODCALLTYPE Localize( - /* [in] */ LCID lcid, - /* [in] */ __RPC__in_opt ISchemaLocalizerSupport *pSchemaLocalizerSupport) = 0; - - virtual HRESULT STDMETHODCALLTYPE SaveBinary( - /* [in] */ __RPC__in LPCWSTR pszSchemaBinaryPath) = 0; - - virtual HRESULT STDMETHODCALLTYPE LookupAuthoredNamedEntity( - /* [in] */ __RPC__in_opt IEntity *pEntity, - /* [in] */ __RPC__in LPCWSTR pszInputString, - /* [in] */ __RPC__in_opt ITokenCollection *pTokenCollection, - /* [in] */ ULONG cTokensBegin, - /* [out] */ __RPC__out ULONG *pcTokensLength, - /* [out] */ __RPC__deref_out_opt LPWSTR *ppszValue) = 0; - - }; - -#else /* C style interface */ - - typedef struct ISchemaProviderVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - ISchemaProvider * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - ISchemaProvider * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - ISchemaProvider * This); - - HRESULT ( STDMETHODCALLTYPE *Entities )( - ISchemaProvider * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pEntities); - - HRESULT ( STDMETHODCALLTYPE *RootEntity )( - ISchemaProvider * This, - /* [retval][out] */ __RPC__deref_out_opt IEntity **pRootEntity); - - HRESULT ( STDMETHODCALLTYPE *GetEntity )( - ISchemaProvider * This, - /* [in] */ __RPC__in LPCWSTR pszEntityName, - /* [retval][out] */ __RPC__deref_out_opt IEntity **pEntity); - - HRESULT ( STDMETHODCALLTYPE *MetaData )( - ISchemaProvider * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData); - - HRESULT ( STDMETHODCALLTYPE *Localize )( - ISchemaProvider * This, - /* [in] */ LCID lcid, - /* [in] */ __RPC__in_opt ISchemaLocalizerSupport *pSchemaLocalizerSupport); - - HRESULT ( STDMETHODCALLTYPE *SaveBinary )( - ISchemaProvider * This, - /* [in] */ __RPC__in LPCWSTR pszSchemaBinaryPath); - - HRESULT ( STDMETHODCALLTYPE *LookupAuthoredNamedEntity )( - ISchemaProvider * This, - /* [in] */ __RPC__in_opt IEntity *pEntity, - /* [in] */ __RPC__in LPCWSTR pszInputString, - /* [in] */ __RPC__in_opt ITokenCollection *pTokenCollection, - /* [in] */ ULONG cTokensBegin, - /* [out] */ __RPC__out ULONG *pcTokensLength, - /* [out] */ __RPC__deref_out_opt LPWSTR *ppszValue); - - END_INTERFACE - } ISchemaProviderVtbl; - - interface ISchemaProvider - { - CONST_VTBL struct ISchemaProviderVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define ISchemaProvider_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define ISchemaProvider_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define ISchemaProvider_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define ISchemaProvider_Entities(This,riid,pEntities) \ - ( (This)->lpVtbl -> Entities(This,riid,pEntities) ) - -#define ISchemaProvider_RootEntity(This,pRootEntity) \ - ( (This)->lpVtbl -> RootEntity(This,pRootEntity) ) - -#define ISchemaProvider_GetEntity(This,pszEntityName,pEntity) \ - ( (This)->lpVtbl -> GetEntity(This,pszEntityName,pEntity) ) - -#define ISchemaProvider_MetaData(This,riid,pMetaData) \ - ( (This)->lpVtbl -> MetaData(This,riid,pMetaData) ) - -#define ISchemaProvider_Localize(This,lcid,pSchemaLocalizerSupport) \ - ( (This)->lpVtbl -> Localize(This,lcid,pSchemaLocalizerSupport) ) - -#define ISchemaProvider_SaveBinary(This,pszSchemaBinaryPath) \ - ( (This)->lpVtbl -> SaveBinary(This,pszSchemaBinaryPath) ) - -#define ISchemaProvider_LookupAuthoredNamedEntity(This,pEntity,pszInputString,pTokenCollection,cTokensBegin,pcTokensLength,ppszValue) \ - ( (This)->lpVtbl -> LookupAuthoredNamedEntity(This,pEntity,pszInputString,pTokenCollection,cTokensBegin,pcTokensLength,ppszValue) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __ISchemaProvider_INTERFACE_DEFINED__ */ - - -#ifndef __ITokenCollection_INTERFACE_DEFINED__ -#define __ITokenCollection_INTERFACE_DEFINED__ - -/* interface ITokenCollection */ -/* [unique][object][uuid][helpstring] */ - - -EXTERN_C const IID IID_ITokenCollection; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("22D8B4F2-F577-4adb-A335-C2AE88416FAB") - ITokenCollection : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE NumberOfTokens( - __RPC__in ULONG *pCount) = 0; - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetToken( - /* [in] */ ULONG i, - /* [out] */ - __out_opt ULONG *pBegin, - /* [out] */ - __out_opt ULONG *pLength, - /* [out] */ - __deref_opt_out LPWSTR *ppsz) = 0; - - }; - -#else /* C style interface */ - - typedef struct ITokenCollectionVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - ITokenCollection * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - ITokenCollection * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - ITokenCollection * This); - - HRESULT ( STDMETHODCALLTYPE *NumberOfTokens )( - ITokenCollection * This, - __RPC__in ULONG *pCount); - - /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetToken )( - ITokenCollection * This, - /* [in] */ ULONG i, - /* [out] */ - __out_opt ULONG *pBegin, - /* [out] */ - __out_opt ULONG *pLength, - /* [out] */ - __deref_opt_out LPWSTR *ppsz); - - END_INTERFACE - } ITokenCollectionVtbl; - - interface ITokenCollection - { - CONST_VTBL struct ITokenCollectionVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define ITokenCollection_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define ITokenCollection_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define ITokenCollection_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define ITokenCollection_NumberOfTokens(This,pCount) \ - ( (This)->lpVtbl -> NumberOfTokens(This,pCount) ) - -#define ITokenCollection_GetToken(This,i,pBegin,pLength,ppsz) \ - ( (This)->lpVtbl -> GetToken(This,i,pBegin,pLength,ppsz) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __ITokenCollection_INTERFACE_DEFINED__ */ - - -/* interface __MIDL_itf_structuredquery_0000_0013 */ -/* [local] */ - -typedef /* [public][public][v1_enum] */ -enum __MIDL___MIDL_itf_structuredquery_0000_0013_0001 - { NEC_LOW = 0, - NEC_MEDIUM = ( NEC_LOW + 1 ) , - NEC_HIGH = ( NEC_MEDIUM + 1 ) - } NAMED_ENTITY_CERTAINTY; - - - -extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0013_v0_0_c_ifspec; -extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0013_v0_0_s_ifspec; - -#ifndef __INamedEntityCollector_INTERFACE_DEFINED__ -#define __INamedEntityCollector_INTERFACE_DEFINED__ - -/* interface INamedEntityCollector */ -/* [unique][object][uuid][helpstring] */ - - -EXTERN_C const IID IID_INamedEntityCollector; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("AF2440F6-8AFC-47d0-9A7F-396A0ACFB43D") - INamedEntityCollector : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE Add( - /* [in] */ ULONG beginSpan, - /* [in] */ ULONG endSpan, - /* [in] */ ULONG beginActual, - /* [in] */ ULONG endActual, - /* [in] */ __RPC__in_opt IEntity *pType, - /* [in] */ __RPC__in LPCWSTR pszValue, - /* [in] */ NAMED_ENTITY_CERTAINTY certainty) = 0; - - }; - -#else /* C style interface */ - - typedef struct INamedEntityCollectorVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - INamedEntityCollector * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - INamedEntityCollector * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - INamedEntityCollector * This); - - HRESULT ( STDMETHODCALLTYPE *Add )( - INamedEntityCollector * This, - /* [in] */ ULONG beginSpan, - /* [in] */ ULONG endSpan, - /* [in] */ ULONG beginActual, - /* [in] */ ULONG endActual, - /* [in] */ __RPC__in_opt IEntity *pType, - /* [in] */ __RPC__in LPCWSTR pszValue, - /* [in] */ NAMED_ENTITY_CERTAINTY certainty); - - END_INTERFACE - } INamedEntityCollectorVtbl; - - interface INamedEntityCollector - { - CONST_VTBL struct INamedEntityCollectorVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define INamedEntityCollector_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define INamedEntityCollector_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define INamedEntityCollector_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define INamedEntityCollector_Add(This,beginSpan,endSpan,beginActual,endActual,pType,pszValue,certainty) \ - ( (This)->lpVtbl -> Add(This,beginSpan,endSpan,beginActual,endActual,pType,pszValue,certainty) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __INamedEntityCollector_INTERFACE_DEFINED__ */ - - -#ifndef __ISchemaLocalizerSupport_INTERFACE_DEFINED__ -#define __ISchemaLocalizerSupport_INTERFACE_DEFINED__ - -/* interface ISchemaLocalizerSupport */ -/* [unique][object][uuid] */ - - -EXTERN_C const IID IID_ISchemaLocalizerSupport; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("CA3FDCA2-BFBE-4eed-90D7-0CAEF0A1BDA1") - ISchemaLocalizerSupport : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE Localize( - /* [in] */ __RPC__in LPCWSTR pszGlobalString, - /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszLocalString) = 0; - - }; - -#else /* C style interface */ - - typedef struct ISchemaLocalizerSupportVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - ISchemaLocalizerSupport * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - ISchemaLocalizerSupport * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - ISchemaLocalizerSupport * This); - - HRESULT ( STDMETHODCALLTYPE *Localize )( - ISchemaLocalizerSupport * This, - /* [in] */ __RPC__in LPCWSTR pszGlobalString, - /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszLocalString); - - END_INTERFACE - } ISchemaLocalizerSupportVtbl; - - interface ISchemaLocalizerSupport - { - CONST_VTBL struct ISchemaLocalizerSupportVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define ISchemaLocalizerSupport_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define ISchemaLocalizerSupport_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define ISchemaLocalizerSupport_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define ISchemaLocalizerSupport_Localize(This,pszGlobalString,ppszLocalString) \ - ( (This)->lpVtbl -> Localize(This,pszGlobalString,ppszLocalString) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __ISchemaLocalizerSupport_INTERFACE_DEFINED__ */ - - -#ifndef __IQueryParserManager_INTERFACE_DEFINED__ -#define __IQueryParserManager_INTERFACE_DEFINED__ - -/* interface IQueryParserManager */ -/* [unique][object][uuid] */ - - -EXTERN_C const IID IID_IQueryParserManager; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("A879E3C4-AF77-44fb-8F37-EBD1487CF920") - IQueryParserManager : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE CreateLoadedParser( - /* [in] */ __RPC__in LPCWSTR pszCatalog, - /* [in] */ LANGID langidForKeywords, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppQueryParser) = 0; - - virtual HRESULT STDMETHODCALLTYPE InitializeOptions( - /* [in] */ BOOL fUnderstandNQS, - /* [in] */ BOOL fAutoWildCard, - /* [in] */ __RPC__in_opt IQueryParser *pQueryParser) = 0; - - virtual HRESULT STDMETHODCALLTYPE SetOption( - /* [in] */ QUERY_PARSER_MANAGER_OPTION option, - /* [in] */ __RPC__in const PROPVARIANT *pOptionValue) = 0; - - }; - -#else /* C style interface */ - - typedef struct IQueryParserManagerVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IQueryParserManager * This, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IQueryParserManager * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IQueryParserManager * This); - - HRESULT ( STDMETHODCALLTYPE *CreateLoadedParser )( - IQueryParserManager * This, - /* [in] */ __RPC__in LPCWSTR pszCatalog, - /* [in] */ LANGID langidForKeywords, - /* [in] */ __RPC__in REFIID riid, - /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppQueryParser); - - HRESULT ( STDMETHODCALLTYPE *InitializeOptions )( - IQueryParserManager * This, - /* [in] */ BOOL fUnderstandNQS, - /* [in] */ BOOL fAutoWildCard, - /* [in] */ __RPC__in_opt IQueryParser *pQueryParser); - - HRESULT ( STDMETHODCALLTYPE *SetOption )( - IQueryParserManager * This, - /* [in] */ QUERY_PARSER_MANAGER_OPTION option, - /* [in] */ __RPC__in const PROPVARIANT *pOptionValue); - - END_INTERFACE - } IQueryParserManagerVtbl; - - interface IQueryParserManager - { - CONST_VTBL struct IQueryParserManagerVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IQueryParserManager_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IQueryParserManager_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IQueryParserManager_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IQueryParserManager_CreateLoadedParser(This,pszCatalog,langidForKeywords,riid,ppQueryParser) \ - ( (This)->lpVtbl -> CreateLoadedParser(This,pszCatalog,langidForKeywords,riid,ppQueryParser) ) - -#define IQueryParserManager_InitializeOptions(This,fUnderstandNQS,fAutoWildCard,pQueryParser) \ - ( (This)->lpVtbl -> InitializeOptions(This,fUnderstandNQS,fAutoWildCard,pQueryParser) ) - -#define IQueryParserManager_SetOption(This,option,pOptionValue) \ - ( (This)->lpVtbl -> SetOption(This,option,pOptionValue) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IQueryParserManager_INTERFACE_DEFINED__ */ - - - -#ifndef __StructuredQuery1_LIBRARY_DEFINED__ -#define __StructuredQuery1_LIBRARY_DEFINED__ - -/* library StructuredQuery1 */ -/* [version][uuid] */ - - -EXTERN_C const IID LIBID_StructuredQuery1; - -EXTERN_C const CLSID CLSID_QueryParser; - -#ifdef __cplusplus - -class DECLSPEC_UUID("B72F8FD8-0FAB-4dd9-BDBF-245A6CE1485B") -QueryParser; -#endif - -EXTERN_C const CLSID CLSID_NegationCondition; - -#ifdef __cplusplus - -class DECLSPEC_UUID("8DE9C74C-605A-4acd-BEE3-2B222AA2D23D") -NegationCondition; -#endif - -EXTERN_C const CLSID CLSID_CompoundCondition; - -#ifdef __cplusplus - -class DECLSPEC_UUID("116F8D13-101E-4fa5-84D4-FF8279381935") -CompoundCondition; -#endif - -EXTERN_C const CLSID CLSID_LeafCondition; - -#ifdef __cplusplus - -class DECLSPEC_UUID("52F15C89-5A17-48e1-BBCD-46A3F89C7CC2") -LeafCondition; -#endif - -EXTERN_C const CLSID CLSID_ConditionFactory; - -#ifdef __cplusplus - -class DECLSPEC_UUID("E03E85B0-7BE3-4000-BA98-6C13DE9FA486") -ConditionFactory; -#endif - -EXTERN_C const CLSID CLSID_Interval; - -#ifdef __cplusplus - -class DECLSPEC_UUID("D957171F-4BF9-4de2-BCD5-C70A7CA55836") -Interval; -#endif - -EXTERN_C const CLSID CLSID_QueryParserManager; - -#ifdef __cplusplus - -class DECLSPEC_UUID("5088B39A-29B4-4d9d-8245-4EE289222F66") -QueryParserManager; -#endif -#endif /* __StructuredQuery1_LIBRARY_DEFINED__ */ - -/* Additional Prototypes for ALL interfaces */ - -unsigned long __RPC_USER BSTR_UserSize( unsigned long *, unsigned long , BSTR * ); -unsigned char * __RPC_USER BSTR_UserMarshal( unsigned long *, unsigned char *, BSTR * ); -unsigned char * __RPC_USER BSTR_UserUnmarshal(unsigned long *, unsigned char *, BSTR * ); -void __RPC_USER BSTR_UserFree( unsigned long *, BSTR * ); - -unsigned long __RPC_USER LPSAFEARRAY_UserSize( unsigned long *, unsigned long , LPSAFEARRAY * ); -unsigned char * __RPC_USER LPSAFEARRAY_UserMarshal( unsigned long *, unsigned char *, LPSAFEARRAY * ); -unsigned char * __RPC_USER LPSAFEARRAY_UserUnmarshal(unsigned long *, unsigned char *, LPSAFEARRAY * ); -void __RPC_USER LPSAFEARRAY_UserFree( unsigned long *, LPSAFEARRAY * ); - -/* end of Additional Prototypes */ - -#ifdef __cplusplus -} -#endif - -#endif - - - diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py deleted file mode 100644 index 3d28c82dc77b8b8b764bcf76d401265903db1a64..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py +++ /dev/null @@ -1,850 +0,0 @@ -# -*- coding: utf-8 -*- - -"""T2CharString operator specializer and generalizer. - -PostScript glyph drawing operations can be expressed in multiple different -ways. For example, as well as the ``lineto`` operator, there is also a -``hlineto`` operator which draws a horizontal line, removing the need to -specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a -vertical line, removing the need to specify a ``dy`` coordinate. As well -as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects -into lists of operations, this module allows for conversion between general -and specific forms of the operation. - -""" - -from fontTools.cffLib import maxStackLimit - - -def stringToProgram(string): - if isinstance(string, str): - string = string.split() - program = [] - for token in string: - try: - token = int(token) - except ValueError: - try: - token = float(token) - except ValueError: - pass - program.append(token) - return program - - -def programToString(program): - return " ".join(str(x) for x in program) - - -def programToCommands(program, getNumRegions=None): - """Takes a T2CharString program list and returns list of commands. - Each command is a two-tuple of commandname,arg-list. The commandname might - be empty string if no commandname shall be emitted (used for glyph width, - hintmask/cntrmask argument, as well as stray arguments at the end of the - program (¯\_(ツ)_/¯). - 'getNumRegions' may be None, or a callable object. It must return the - number of regions. 'getNumRegions' takes a single argument, vsindex. If - the vsindex argument is None, getNumRegions returns the default number - of regions for the charstring, else it returns the numRegions for - the vsindex. - The Charstring may or may not start with a width value. If the first - non-blend operator has an odd number of arguments, then the first argument is - a width, and is popped off. This is complicated with blend operators, as - there may be more than one before the first hint or moveto operator, and each - one reduces several arguments to just one list argument. We have to sum the - number of arguments that are not part of the blend arguments, and all the - 'numBlends' values. We could instead have said that by definition, if there - is a blend operator, there is no width value, since CFF2 Charstrings don't - have width values. I discussed this with Behdad, and we are allowing for an - initial width value in this case because developers may assemble a CFF2 - charstring from CFF Charstrings, which could have width values. - """ - - seenWidthOp = False - vsIndex = None - lenBlendStack = 0 - lastBlendIndex = 0 - commands = [] - stack = [] - it = iter(program) - - for token in it: - if not isinstance(token, str): - stack.append(token) - continue - - if token == "blend": - assert getNumRegions is not None - numSourceFonts = 1 + getNumRegions(vsIndex) - # replace the blend op args on the stack with a single list - # containing all the blend op args. - numBlends = stack[-1] - numBlendArgs = numBlends * numSourceFonts + 1 - # replace first blend op by a list of the blend ops. - stack[-numBlendArgs:] = [stack[-numBlendArgs:]] - lenBlendStack += numBlends + len(stack) - 1 - lastBlendIndex = len(stack) - # if a blend op exists, this is or will be a CFF2 charstring. - continue - - elif token == "vsindex": - vsIndex = stack[-1] - assert type(vsIndex) is int - - elif (not seenWidthOp) and token in { - "hstem", - "hstemhm", - "vstem", - "vstemhm", - "cntrmask", - "hintmask", - "hmoveto", - "vmoveto", - "rmoveto", - "endchar", - }: - seenWidthOp = True - parity = token in {"hmoveto", "vmoveto"} - if lenBlendStack: - # lenBlendStack has the number of args represented by the last blend - # arg and all the preceding args. We need to now add the number of - # args following the last blend arg. - numArgs = lenBlendStack + len(stack[lastBlendIndex:]) - else: - numArgs = len(stack) - if numArgs and (numArgs % 2) ^ parity: - width = stack.pop(0) - commands.append(("", [width])) - - if token in {"hintmask", "cntrmask"}: - if stack: - commands.append(("", stack)) - commands.append((token, [])) - commands.append(("", [next(it)])) - else: - commands.append((token, stack)) - stack = [] - if stack: - commands.append(("", stack)) - return commands - - -def _flattenBlendArgs(args): - token_list = [] - for arg in args: - if isinstance(arg, list): - token_list.extend(arg) - token_list.append("blend") - else: - token_list.append(arg) - return token_list - - -def commandsToProgram(commands): - """Takes a commands list as returned by programToCommands() and converts - it back to a T2CharString program list.""" - program = [] - for op, args in commands: - if any(isinstance(arg, list) for arg in args): - args = _flattenBlendArgs(args) - program.extend(args) - if op: - program.append(op) - return program - - -def _everyN(el, n): - """Group the list el into groups of size n""" - if len(el) % n != 0: - raise ValueError(el) - for i in range(0, len(el), n): - yield el[i : i + n] - - -class _GeneralizerDecombinerCommandsMap(object): - @staticmethod - def rmoveto(args): - if len(args) != 2: - raise ValueError(args) - yield ("rmoveto", args) - - @staticmethod - def hmoveto(args): - if len(args) != 1: - raise ValueError(args) - yield ("rmoveto", [args[0], 0]) - - @staticmethod - def vmoveto(args): - if len(args) != 1: - raise ValueError(args) - yield ("rmoveto", [0, args[0]]) - - @staticmethod - def rlineto(args): - if not args: - raise ValueError(args) - for args in _everyN(args, 2): - yield ("rlineto", args) - - @staticmethod - def hlineto(args): - if not args: - raise ValueError(args) - it = iter(args) - try: - while True: - yield ("rlineto", [next(it), 0]) - yield ("rlineto", [0, next(it)]) - except StopIteration: - pass - - @staticmethod - def vlineto(args): - if not args: - raise ValueError(args) - it = iter(args) - try: - while True: - yield ("rlineto", [0, next(it)]) - yield ("rlineto", [next(it), 0]) - except StopIteration: - pass - - @staticmethod - def rrcurveto(args): - if not args: - raise ValueError(args) - for args in _everyN(args, 6): - yield ("rrcurveto", args) - - @staticmethod - def hhcurveto(args): - if len(args) < 4 or len(args) % 4 > 1: - raise ValueError(args) - if len(args) % 2 == 1: - yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0]) - args = args[5:] - for args in _everyN(args, 4): - yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0]) - - @staticmethod - def vvcurveto(args): - if len(args) < 4 or len(args) % 4 > 1: - raise ValueError(args) - if len(args) % 2 == 1: - yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]]) - args = args[5:] - for args in _everyN(args, 4): - yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]]) - - @staticmethod - def hvcurveto(args): - if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}: - raise ValueError(args) - last_args = None - if len(args) % 2 == 1: - lastStraight = len(args) % 8 == 5 - args, last_args = args[:-5], args[-5:] - it = _everyN(args, 4) - try: - while True: - args = next(it) - yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]]) - args = next(it) - yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0]) - except StopIteration: - pass - if last_args: - args = last_args - if lastStraight: - yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]]) - else: - yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]]) - - @staticmethod - def vhcurveto(args): - if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}: - raise ValueError(args) - last_args = None - if len(args) % 2 == 1: - lastStraight = len(args) % 8 == 5 - args, last_args = args[:-5], args[-5:] - it = _everyN(args, 4) - try: - while True: - args = next(it) - yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0]) - args = next(it) - yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]]) - except StopIteration: - pass - if last_args: - args = last_args - if lastStraight: - yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]]) - else: - yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]]) - - @staticmethod - def rcurveline(args): - if len(args) < 8 or len(args) % 6 != 2: - raise ValueError(args) - args, last_args = args[:-2], args[-2:] - for args in _everyN(args, 6): - yield ("rrcurveto", args) - yield ("rlineto", last_args) - - @staticmethod - def rlinecurve(args): - if len(args) < 8 or len(args) % 2 != 0: - raise ValueError(args) - args, last_args = args[:-6], args[-6:] - for args in _everyN(args, 2): - yield ("rlineto", args) - yield ("rrcurveto", last_args) - - -def _convertBlendOpToArgs(blendList): - # args is list of blend op args. Since we are supporting - # recursive blend op calls, some of these args may also - # be a list of blend op args, and need to be converted before - # we convert the current list. - if any([isinstance(arg, list) for arg in blendList]): - args = [ - i - for e in blendList - for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e]) - ] - else: - args = blendList - - # We now know that blendList contains a blend op argument list, even if - # some of the args are lists that each contain a blend op argument list. - # Convert from: - # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn] - # to: - # [ [x0] + [delta tuple for x0], - # ..., - # [xn] + [delta tuple for xn] ] - numBlends = args[-1] - # Can't use args.pop() when the args are being used in a nested list - # comprehension. See calling context - args = args[:-1] - - numRegions = len(args) // numBlends - 1 - if not (numBlends * (numRegions + 1) == len(args)): - raise ValueError(blendList) - - defaultArgs = [[arg] for arg in args[:numBlends]] - deltaArgs = args[numBlends:] - numDeltaValues = len(deltaArgs) - deltaList = [ - deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions) - ] - blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)] - return blend_args - - -def generalizeCommands(commands, ignoreErrors=False): - result = [] - mapping = _GeneralizerDecombinerCommandsMap - for op, args in commands: - # First, generalize any blend args in the arg list. - if any([isinstance(arg, list) for arg in args]): - try: - args = [ - n - for arg in args - for n in ( - _convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg] - ) - ] - except ValueError: - if ignoreErrors: - # Store op as data, such that consumers of commands do not have to - # deal with incorrect number of arguments. - result.append(("", args)) - result.append(("", [op])) - else: - raise - - func = getattr(mapping, op, None) - if not func: - result.append((op, args)) - continue - try: - for command in func(args): - result.append(command) - except ValueError: - if ignoreErrors: - # Store op as data, such that consumers of commands do not have to - # deal with incorrect number of arguments. - result.append(("", args)) - result.append(("", [op])) - else: - raise - return result - - -def generalizeProgram(program, getNumRegions=None, **kwargs): - return commandsToProgram( - generalizeCommands(programToCommands(program, getNumRegions), **kwargs) - ) - - -def _categorizeVector(v): - """ - Takes X,Y vector v and returns one of r, h, v, or 0 depending on which - of X and/or Y are zero, plus tuple of nonzero ones. If both are zero, - it returns a single zero still. - - >>> _categorizeVector((0,0)) - ('0', (0,)) - >>> _categorizeVector((1,0)) - ('h', (1,)) - >>> _categorizeVector((0,2)) - ('v', (2,)) - >>> _categorizeVector((1,2)) - ('r', (1, 2)) - """ - if not v[0]: - if not v[1]: - return "0", v[:1] - else: - return "v", v[1:] - else: - if not v[1]: - return "h", v[:1] - else: - return "r", v - - -def _mergeCategories(a, b): - if a == "0": - return b - if b == "0": - return a - if a == b: - return a - return None - - -def _negateCategory(a): - if a == "h": - return "v" - if a == "v": - return "h" - assert a in "0r" - return a - - -def _convertToBlendCmds(args): - # return a list of blend commands, and - # the remaining non-blended args, if any. - num_args = len(args) - stack_use = 0 - new_args = [] - i = 0 - while i < num_args: - arg = args[i] - if not isinstance(arg, list): - new_args.append(arg) - i += 1 - stack_use += 1 - else: - prev_stack_use = stack_use - # The arg is a tuple of blend values. - # These are each (master 0,delta 1..delta n, 1) - # Combine as many successive tuples as we can, - # up to the max stack limit. - num_sources = len(arg) - 1 - blendlist = [arg] - i += 1 - stack_use += 1 + num_sources # 1 for the num_blends arg - while (i < num_args) and isinstance(args[i], list): - blendlist.append(args[i]) - i += 1 - stack_use += num_sources - if stack_use + num_sources > maxStackLimit: - # if we are here, max stack is the CFF2 max stack. - # I use the CFF2 max stack limit here rather than - # the 'maxstack' chosen by the client, as the default - # maxstack may have been used unintentionally. For all - # the other operators, this just produces a little less - # optimization, but here it puts a hard (and low) limit - # on the number of source fonts that can be used. - break - # blendList now contains as many single blend tuples as can be - # combined without exceeding the CFF2 stack limit. - num_blends = len(blendlist) - # append the 'num_blends' default font values - blend_args = [] - for arg in blendlist: - blend_args.append(arg[0]) - for arg in blendlist: - assert arg[-1] == 1 - blend_args.extend(arg[1:-1]) - blend_args.append(num_blends) - new_args.append(blend_args) - stack_use = prev_stack_use + num_blends - - return new_args - - -def _addArgs(a, b): - if isinstance(b, list): - if isinstance(a, list): - if len(a) != len(b) or a[-1] != b[-1]: - raise ValueError() - return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]] - else: - a, b = b, a - if isinstance(a, list): - assert a[-1] == 1 - return [_addArgs(a[0], b)] + a[1:] - return a + b - - -def specializeCommands( - commands, - ignoreErrors=False, - generalizeFirst=True, - preserveTopology=False, - maxstack=48, -): - - # We perform several rounds of optimizations. They are carefully ordered and are: - # - # 0. Generalize commands. - # This ensures that they are in our expected simple form, with each line/curve only - # having arguments for one segment, and using the generic form (rlineto/rrcurveto). - # If caller is sure the input is in this form, they can turn off generalization to - # save time. - # - # 1. Combine successive rmoveto operations. - # - # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. - # We specialize into some, made-up, variants as well, which simplifies following - # passes. - # - # 3. Merge or delete redundant operations, to the extent requested. - # OpenType spec declares point numbers in CFF undefined. As such, we happily - # change topology. If client relies on point numbers (in GPOS anchors, or for - # hinting purposes(what?)) they can turn this off. - # - # 4. Peephole optimization to revert back some of the h/v variants back into their - # original "relative" operator (rline/rrcurveto) if that saves a byte. - # - # 5. Combine adjacent operators when possible, minding not to go over max stack size. - # - # 6. Resolve any remaining made-up operators into real operators. - # - # I have convinced myself that this produces optimal bytecode (except for, possibly - # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-) - # A dynamic-programming approach can do the same but would be significantly slower. - # - # 7. For any args which are blend lists, convert them to a blend command. - - # 0. Generalize commands. - if generalizeFirst: - commands = generalizeCommands(commands, ignoreErrors=ignoreErrors) - else: - commands = list(commands) # Make copy since we modify in-place later. - - # 1. Combine successive rmoveto operations. - for i in range(len(commands) - 1, 0, -1): - if "rmoveto" == commands[i][0] == commands[i - 1][0]: - v1, v2 = commands[i - 1][1], commands[i][1] - commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]]) - del commands[i] - - # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. - # - # We, in fact, specialize into more, made-up, variants that special-case when both - # X and Y components are zero. This simplifies the following optimization passes. - # This case is rare, but OCD does not let me skip it. - # - # After this round, we will have four variants that use the following mnemonics: - # - # - 'r' for relative, ie. non-zero X and non-zero Y, - # - 'h' for horizontal, ie. zero X and non-zero Y, - # - 'v' for vertical, ie. non-zero X and zero Y, - # - '0' for zeros, ie. zero X and zero Y. - # - # The '0' pseudo-operators are not part of the spec, but help simplify the following - # optimization rounds. We resolve them at the end. So, after this, we will have four - # moveto and four lineto variants: - # - # - 0moveto, 0lineto - # - hmoveto, hlineto - # - vmoveto, vlineto - # - rmoveto, rlineto - # - # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve - # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3. - # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3. - # - # There are nine different variants of curves without the '0'. Those nine map exactly - # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto, - # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of - # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of - # arguments) is in fact an rhcurveto. The operators in the spec are designed such that - # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve. - # - # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest - # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be - # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always - # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute - # the '0' with either 'h' or 'v' and it works. - # - # When we get to curve splines however, things become more complicated... XXX finish this. - # There's one more complexity with splines. If one side of the spline is not horizontal or - # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode. - # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and - # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'. - # This limits our merge opportunities later. - # - for i in range(len(commands)): - op, args = commands[i] - - if op in {"rmoveto", "rlineto"}: - c, args = _categorizeVector(args) - commands[i] = c + op[1:], args - continue - - if op == "rrcurveto": - c1, args1 = _categorizeVector(args[:2]) - c2, args2 = _categorizeVector(args[-2:]) - commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2 - continue - - # 3. Merge or delete redundant operations, to the extent requested. - # - # TODO - # A 0moveto that comes before all other path operations can be removed. - # though I find conflicting evidence for this. - # - # TODO - # "If hstem and vstem hints are both declared at the beginning of a - # CharString, and this sequence is followed directly by the hintmask or - # cntrmask operators, then the vstem hint operator (or, if applicable, - # the vstemhm operator) need not be included." - # - # "The sequence and form of a CFF2 CharString program may be represented as: - # {hs* vs* cm* hm* mt subpath}? {mt subpath}*" - # - # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1 - # - # For Type2 CharStrings the sequence is: - # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar" - - # Some other redundancies change topology (point numbers). - if not preserveTopology: - for i in range(len(commands) - 1, -1, -1): - op, args = commands[i] - - # A 00curveto is demoted to a (specialized) lineto. - if op == "00curveto": - assert len(args) == 4 - c, args = _categorizeVector(args[1:3]) - op = c + "lineto" - commands[i] = op, args - # and then... - - # A 0lineto can be deleted. - if op == "0lineto": - del commands[i] - continue - - # Merge adjacent hlineto's and vlineto's. - # In CFF2 charstrings from variable fonts, each - # arg item may be a list of blendable values, one from - # each source font. - if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]): - _, other_args = commands[i - 1] - assert len(args) == 1 and len(other_args) == 1 - try: - new_args = [_addArgs(args[0], other_args[0])] - except ValueError: - continue - commands[i - 1] = (op, new_args) - del commands[i] - continue - - # 4. Peephole optimization to revert back some of the h/v variants back into their - # original "relative" operator (rline/rrcurveto) if that saves a byte. - for i in range(1, len(commands) - 1): - op, args = commands[i] - prv, nxt = commands[i - 1][0], commands[i + 1][0] - - if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto": - assert len(args) == 1 - args = [0, args[0]] if op[0] == "v" else [args[0], 0] - commands[i] = ("rlineto", args) - continue - - if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto": - assert (op[0] == "r") ^ (op[1] == "r") - if op[0] == "v": - pos = 0 - elif op[0] != "r": - pos = 1 - elif op[1] == "v": - pos = 4 - else: - pos = 5 - # Insert, while maintaining the type of args (can be tuple or list). - args = args[:pos] + type(args)((0,)) + args[pos:] - commands[i] = ("rrcurveto", args) - continue - - # 5. Combine adjacent operators when possible, minding not to go over max stack size. - for i in range(len(commands) - 1, 0, -1): - op1, args1 = commands[i - 1] - op2, args2 = commands[i] - new_op = None - - # Merge logic... - if {op1, op2} <= {"rlineto", "rrcurveto"}: - if op1 == op2: - new_op = op1 - else: - if op2 == "rrcurveto" and len(args2) == 6: - new_op = "rlinecurve" - elif len(args2) == 2: - new_op = "rcurveline" - - elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}: - new_op = op2 - - elif {op1, op2} == {"vlineto", "hlineto"}: - new_op = op1 - - elif "curveto" == op1[2:] == op2[2:]: - d0, d1 = op1[:2] - d2, d3 = op2[:2] - - if d1 == "r" or d2 == "r" or d0 == d3 == "r": - continue - - d = _mergeCategories(d1, d2) - if d is None: - continue - if d0 == "r": - d = _mergeCategories(d, d3) - if d is None: - continue - new_op = "r" + d + "curveto" - elif d3 == "r": - d0 = _mergeCategories(d0, _negateCategory(d)) - if d0 is None: - continue - new_op = d0 + "r" + "curveto" - else: - d0 = _mergeCategories(d0, d3) - if d0 is None: - continue - new_op = d0 + d + "curveto" - - # Make sure the stack depth does not exceed (maxstack - 1), so - # that subroutinizer can insert subroutine calls at any point. - if new_op and len(args1) + len(args2) < maxstack: - commands[i - 1] = (new_op, args1 + args2) - del commands[i] - - # 6. Resolve any remaining made-up operators into real operators. - for i in range(len(commands)): - op, args = commands[i] - - if op in {"0moveto", "0lineto"}: - commands[i] = "h" + op[1:], args - continue - - if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}: - op0, op1 = op[:2] - if (op0 == "r") ^ (op1 == "r"): - assert len(args) % 2 == 1 - if op0 == "0": - op0 = "h" - if op1 == "0": - op1 = "h" - if op0 == "r": - op0 = op1 - if op1 == "r": - op1 = _negateCategory(op0) - assert {op0, op1} <= {"h", "v"}, (op0, op1) - - if len(args) % 2: - if op0 != op1: # vhcurveto / hvcurveto - if (op0 == "h") ^ (len(args) % 8 == 1): - # Swap last two args order - args = args[:-2] + args[-1:] + args[-2:-1] - else: # hhcurveto / vvcurveto - if op0 == "h": # hhcurveto - # Swap first two args order - args = args[1:2] + args[:1] + args[2:] - - commands[i] = op0 + op1 + "curveto", args - continue - - # 7. For any series of args which are blend lists, convert the series to a single blend arg. - for i in range(len(commands)): - op, args = commands[i] - if any(isinstance(arg, list) for arg in args): - commands[i] = op, _convertToBlendCmds(args) - - return commands - - -def specializeProgram(program, getNumRegions=None, **kwargs): - return commandsToProgram( - specializeCommands(programToCommands(program, getNumRegions), **kwargs) - ) - - -if __name__ == "__main__": - import sys - - if len(sys.argv) == 1: - import doctest - - sys.exit(doctest.testmod().failed) - - import argparse - - parser = argparse.ArgumentParser( - "fonttools cffLib.specialer", - description="CFF CharString generalizer/specializer", - ) - parser.add_argument("program", metavar="command", nargs="*", help="Commands.") - parser.add_argument( - "--num-regions", - metavar="NumRegions", - nargs="*", - default=None, - help="Number of variable-font regions for blend opertaions.", - ) - - options = parser.parse_args(sys.argv[1:]) - - getNumRegions = ( - None - if options.num_regions is None - else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex]) - ) - - program = stringToProgram(options.program) - print("Program:") - print(programToString(program)) - commands = programToCommands(program, getNumRegions) - print("Commands:") - print(commands) - program2 = commandsToProgram(commands) - print("Program from commands:") - print(programToString(program2)) - assert program == program2 - print("Generalized program:") - print(programToString(generalizeProgram(program, getNumRegions))) - print("Specialized program:") - print(programToString(specializeProgram(program, getNumRegions))) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-fef9d5f8.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-fef9d5f8.js deleted file mode 100644 index 0672f2fb4a6a2f22671672ffbd003d906fd75ea0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-fef9d5f8.js +++ /dev/null @@ -1,104 +0,0 @@ -import{B as Tn}from"./Button-89057c03.js";import{u as si}from"./utils-c3e3db58.js";import{B as Wn}from"./BlockLabel-e3b0d1c3.js";import{I as ri}from"./IconButton-16e5dbea.js";import{E as zn}from"./Empty-937365d8.js";import{S as ai}from"./ShareButton-d3fa81fa.js";import{D as li}from"./Download-696bd40c.js";import{S as Bn}from"./Index-37584f50.js";import{a as ui,P as On,T as di}from"./Trim-78ec077e.js";import{U as ci,M as qt}from"./ModifyUpload-87a26b2d.js";import{r as hi}from"./file-url-f4206b44.js";import{_ as tn,p as fi,u as mi,n as _i}from"./index-0526d562.js";import{U as pi}from"./Upload-a4034e93.js";import{a as gi,U as vi}from"./UploadText-232a3213.js";import{default as Hr}from"./Example-1fe376d1.js";import"./Clear-2c7bae91.js";/* empty css */import"./svelte/svelte.js";const{SvelteComponent:bi,append:wi,attr:x,detach:yi,init:ki,insert:Ci,noop:Rt,safe_not_equal:Ei,svg_element:nn}=window.__gradio__svelte__internal;function Ri(o){let e,t;return{c(){e=nn("svg"),t=nn("path"),x(t,"stroke","currentColor"),x(t,"stroke-width","1.5"),x(t,"stroke-linecap","round"),x(t,"stroke-linejoin","round"),x(t,"d","M21.044 5.704a.6.6 0 0 1 .956.483v11.626a.6.6 0 0 1-.956.483l-7.889-5.813a.6.6 0 0 1 0-.966l7.89-5.813ZM10.044 5.704a.6.6 0 0 1 .956.483v11.626a.6.6 0 0 1-.956.483l-7.888-5.813a.6.6 0 0 1 0-.966l7.888-5.813Z"),x(e,"xmlns","http://www.w3.org/2000/svg"),x(e,"width","24px"),x(e,"height","24px"),x(e,"fill","currentColor"),x(e,"stroke-width","1.5"),x(e,"viewBox","0 0 24 24"),x(e,"color","currentColor")},m(n,i){Ci(n,e,i),wi(e,t)},p:Rt,i:Rt,o:Rt,d(n){n&&yi(e)}}}class Si extends bi{constructor(e){super(),ki(this,e,null,Ri,Ei,{})}}const{SvelteComponent:Di,append:Mi,attr:ee,detach:Li,init:Ai,insert:Pi,noop:St,safe_not_equal:Ti,svg_element:on}=window.__gradio__svelte__internal;function Wi(o){let e,t;return{c(){e=on("svg"),t=on("path"),ee(t,"stroke","currentColor"),ee(t,"stroke-width","1.5"),ee(t,"stroke-linecap","round"),ee(t,"stroke-linejoin","round"),ee(t,"d","M2.956 5.704A.6.6 0 0 0 2 6.187v11.626a.6.6 0 0 0 .956.483l7.889-5.813a.6.6 0 0 0 0-.966l-7.89-5.813ZM13.956 5.704a.6.6 0 0 0-.956.483v11.626a.6.6 0 0 0 .956.483l7.889-5.813a.6.6 0 0 0 0-.966l-7.89-5.813Z"),ee(e,"xmlns","http://www.w3.org/2000/svg"),ee(e,"width","24px"),ee(e,"height","24px"),ee(e,"fill","currentColor"),ee(e,"stroke-width","1.5"),ee(e,"viewBox","0 0 24 24"),ee(e,"color","currentColor")},m(n,i){Pi(n,e,i),Mi(e,t)},p:St,i:St,o:St,d(n){n&&Li(e)}}}class zi extends Di{constructor(e){super(),Ai(this,e,null,Wi,Ti,{})}}const{SvelteComponent:Bi,append:rt,attr:H,detach:Oi,init:Ii,insert:Ni,noop:Dt,safe_not_equal:Hi,svg_element:Je}=window.__gradio__svelte__internal;function qi(o){let e,t,n,i,s;return{c(){e=Je("svg"),t=Je("path"),n=Je("path"),i=Je("line"),s=Je("line"),H(t,"d","M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"),H(n,"d","M19 10v2a7 7 0 0 1-14 0v-2"),H(i,"x1","12"),H(i,"y1","19"),H(i,"x2","12"),H(i,"y2","23"),H(s,"x1","8"),H(s,"y1","23"),H(s,"x2","16"),H(s,"y2","23"),H(e,"xmlns","http://www.w3.org/2000/svg"),H(e,"width","100%"),H(e,"height","100%"),H(e,"viewBox","0 0 24 24"),H(e,"fill","none"),H(e,"stroke","currentColor"),H(e,"stroke-width","2"),H(e,"stroke-linecap","round"),H(e,"stroke-linejoin","round"),H(e,"class","feather feather-mic")},m(a,u){Ni(a,e,u),rt(e,t),rt(e,n),rt(e,i),rt(e,s)},p:Dt,i:Dt,o:Dt,d(a){a&&Oi(e)}}}class Ui extends Bi{constructor(e){super(),Ii(this,e,null,qi,Hi,{})}}const{SvelteComponent:ji,append:Mt,attr:V,detach:Fi,init:Vi,insert:Xi,noop:Lt,safe_not_equal:Gi,svg_element:at}=window.__gradio__svelte__internal;function Yi(o){let e,t,n,i;return{c(){e=at("svg"),t=at("path"),n=at("circle"),i=at("circle"),V(t,"d","M9 18V5l12-2v13"),V(n,"cx","6"),V(n,"cy","18"),V(n,"r","3"),V(i,"cx","18"),V(i,"cy","16"),V(i,"r","3"),V(e,"xmlns","http://www.w3.org/2000/svg"),V(e,"width","100%"),V(e,"height","100%"),V(e,"viewBox","0 0 24 24"),V(e,"fill","none"),V(e,"stroke","currentColor"),V(e,"stroke-width","1.5"),V(e,"stroke-linecap","round"),V(e,"stroke-linejoin","round"),V(e,"class","feather feather-music")},m(s,a){Xi(s,e,a),Mt(e,t),Mt(e,n),Mt(e,i)},p:Lt,i:Lt,o:Lt,d(s){s&&Fi(e)}}}class gt extends ji{constructor(e){super(),Vi(this,e,null,Yi,Gi,{})}}var Zi=globalThis&&globalThis.__awaiter||function(o,e,t,n){function i(s){return s instanceof t?s:new t(function(a){a(s)})}return new(t||(t=Promise))(function(s,a){function u(r){try{d(n.next(r))}catch(c){a(c)}}function l(r){try{d(n.throw(r))}catch(c){a(c)}}function d(r){r.done?s(r.value):i(r.value).then(u,l)}d((n=n.apply(o,e||[])).next())})};function Ji(o,e){return Zi(this,void 0,void 0,function*(){const t=new AudioContext({sampleRate:e});return t.decodeAudioData(o).finally(()=>t.close())})}function Ki(o){const e=o[0];if(e.some(t=>t>1||t<-1)){const t=e.length;let n=0;for(let i=0;in&&(n=s)}for(const i of o)for(let s=0;so?.[t],copyFromChannel:AudioBuffer.prototype.copyFromChannel,copyToChannel:AudioBuffer.prototype.copyToChannel}}const At={decode:Ji,createBuffer:Qi};var sn=globalThis&&globalThis.__awaiter||function(o,e,t,n){function i(s){return s instanceof t?s:new t(function(a){a(s)})}return new(t||(t=Promise))(function(s,a){function u(r){try{d(n.next(r))}catch(c){a(c)}}function l(r){try{d(n.throw(r))}catch(c){a(c)}}function d(r){r.done?s(r.value):i(r.value).then(u,l)}d((n=n.apply(o,e||[])).next())})};function $i(o,e,t){var n,i;return sn(this,void 0,void 0,function*(){const s=yield fetch(o,t);{const a=(n=s.clone().body)===null||n===void 0?void 0:n.getReader(),u=Number((i=s.headers)===null||i===void 0?void 0:i.get("Content-Length"));let l=0;const d=(r,c)=>sn(this,void 0,void 0,function*(){if(r)return;l+=c?.length||0;const h=Math.round(l/u*100);return e(h),a?.read().then(({done:f,value:m})=>d(f,m))});a?.read().then(({done:r,value:c})=>d(r,c))}return s.blob()})}const xi={fetchBlob:$i};class vt{constructor(){this.listeners={},this.on=this.addEventListener,this.un=this.removeEventListener}addEventListener(e,t,n){if(this.listeners[e]||(this.listeners[e]=new Set),this.listeners[e].add(t),n?.once){const i=()=>{this.removeEventListener(e,i),this.removeEventListener(e,t)};return this.addEventListener(e,i),i}return()=>this.removeEventListener(e,t)}removeEventListener(e,t){var n;(n=this.listeners[e])===null||n===void 0||n.delete(t)}once(e,t){return this.on(e,t,{once:!0})}unAll(){this.listeners={}}emit(e,...t){this.listeners[e]&&this.listeners[e].forEach(n=>n(...t))}}class eo extends vt{constructor(e){super(),this.isExternalMedia=!1,e.media?(this.media=e.media,this.isExternalMedia=!0):this.media=document.createElement("audio"),e.mediaControls&&(this.media.controls=!0),e.autoplay&&(this.media.autoplay=!0),e.playbackRate!=null&&this.onceMediaEvent("canplay",()=>{e.playbackRate!=null&&(this.media.playbackRate=e.playbackRate)})}onMediaEvent(e,t,n){return this.media.addEventListener(e,t,n),()=>this.media.removeEventListener(e,t)}onceMediaEvent(e,t){return this.onMediaEvent(e,t,{once:!0})}getSrc(){return this.media.currentSrc||this.media.src||""}revokeSrc(){const e=this.getSrc();e.startsWith("blob:")&&URL.revokeObjectURL(e)}setSrc(e,t){if(this.getSrc()===e)return;this.revokeSrc();const i=t instanceof Blob?URL.createObjectURL(t):e;this.media.src=i,this.media.load()}destroy(){this.media.pause(),!this.isExternalMedia&&(this.media.remove(),this.revokeSrc(),this.media.src="",this.media.load())}setMediaElement(e){this.media=e}play(){return this.media.play()}pause(){this.media.pause()}isPlaying(){return!this.media.paused&&!this.media.ended}setTime(e){this.media.currentTime=e}getDuration(){return this.media.duration}getCurrentTime(){return this.media.currentTime}getVolume(){return this.media.volume}setVolume(e){this.media.volume=e}getMuted(){return this.media.muted}setMuted(e){this.media.muted=e}getPlaybackRate(){return this.media.playbackRate}setPlaybackRate(e,t){t!=null&&(this.media.preservesPitch=t),this.media.playbackRate=e}getMediaElement(){return this.media}setSinkId(e){return this.media.setSinkId(e)}}function to(o,e,t,n,i=5){let s=()=>{};if(!o)return s;const a=u=>{if(u.button===2)return;u.preventDefault(),u.stopPropagation(),o.style.touchAction="none";let l=u.clientX,d=u.clientY,r=!1;const c=m=>{m.preventDefault(),m.stopPropagation();const _=m.clientX,g=m.clientY;if(r||Math.abs(_-l)>=i||Math.abs(g-d)>=i){const{left:v,top:y}=o.getBoundingClientRect();r||(r=!0,t?.(l-v,d-y)),e(_-l,g-d,_-v,g-y),l=_,d=g}},h=m=>{r&&(m.preventDefault(),m.stopPropagation())},f=()=>{o.style.touchAction="",r&&n?.(),s()};document.addEventListener("pointermove",c),document.addEventListener("pointerup",f),document.addEventListener("pointerleave",f),document.addEventListener("click",h,!0),s=()=>{document.removeEventListener("pointermove",c),document.removeEventListener("pointerup",f),document.removeEventListener("pointerleave",f),setTimeout(()=>{document.removeEventListener("click",h,!0)},10)}};return o.addEventListener("pointerdown",a),()=>{s(),o.removeEventListener("pointerdown",a)}}class bt extends vt{constructor(e,t){super(),this.timeouts=[],this.isScrolling=!1,this.audioData=null,this.resizeObserver=null,this.isDragging=!1,this.options=e;const n=this.parentFromOptionsContainer(e.container);this.parent=n;const[i,s]=this.initHtml();n.appendChild(i),this.container=i,this.scrollContainer=s.querySelector(".scroll"),this.wrapper=s.querySelector(".wrapper"),this.canvasWrapper=s.querySelector(".canvases"),this.progressWrapper=s.querySelector(".progress"),this.cursor=s.querySelector(".cursor"),t&&s.appendChild(t),this.initEvents()}parentFromOptionsContainer(e){let t;if(typeof e=="string"?t=document.querySelector(e):e instanceof HTMLElement&&(t=e),!t)throw new Error("Container not found");return t}initEvents(){const e=n=>{const i=this.wrapper.getBoundingClientRect(),s=n.clientX-i.left,a=n.clientX-i.left,u=s/i.width,l=a/i.height;return[u,l]};this.wrapper.addEventListener("click",n=>{const[i,s]=e(n);this.emit("click",i,s)}),this.wrapper.addEventListener("dblclick",n=>{const[i,s]=e(n);this.emit("dblclick",i,s)}),this.options.dragToSeek&&this.initDrag(),this.scrollContainer.addEventListener("scroll",()=>{const{scrollLeft:n,scrollWidth:i,clientWidth:s}=this.scrollContainer,a=n/i,u=(n+s)/i;this.emit("scroll",a,u)});const t=this.createDelay(100);this.resizeObserver=new ResizeObserver(()=>{t(()=>this.reRender())}),this.resizeObserver.observe(this.scrollContainer)}initDrag(){to(this.wrapper,(e,t,n)=>{this.emit("drag",Math.max(0,Math.min(1,n/this.wrapper.getBoundingClientRect().width)))},()=>this.isDragging=!0,()=>this.isDragging=!1)}getHeight(){return this.options.height==null?128:isNaN(Number(this.options.height))?this.options.height==="auto"&&this.parent.clientHeight||128:Number(this.options.height)}initHtml(){const e=document.createElement("div"),t=e.attachShadow({mode:"open"});return t.innerHTML=` - - -
      -
      -
      -
      -
      -
      -
      - `,[e,t]}setOptions(e){if(this.options.container!==e.container){const t=this.parentFromOptionsContainer(e.container);t.appendChild(this.container),this.parent=t}e.dragToSeek&&!this.options.dragToSeek&&this.initDrag(),this.options=e,this.reRender()}getWrapper(){return this.wrapper}getScroll(){return this.scrollContainer.scrollLeft}destroy(){var e;this.container.remove(),(e=this.resizeObserver)===null||e===void 0||e.disconnect()}createDelay(e=10){const t={};return this.timeouts.push(t),n=>{t.timeout&&clearTimeout(t.timeout),t.timeout=setTimeout(n,e)}}convertColorValues(e){if(!Array.isArray(e))return e||"";if(e.length<2)return e[0]||"";const t=document.createElement("canvas"),i=t.getContext("2d").createLinearGradient(0,0,0,t.height),s=1/(e.length-1);return e.forEach((a,u)=>{const l=u*s;i.addColorStop(l,a)}),i}renderBarWaveform(e,t,n,i){const s=e[0],a=e[1]||e[0],u=s.length,{width:l,height:d}=n.canvas,r=d/2,c=window.devicePixelRatio||1,h=t.barWidth?t.barWidth*c:1,f=t.barGap?t.barGap*c:t.barWidth?h/2:0,m=t.barRadius||0,_=l/(h+f)/u,g=m&&"roundRect"in n?"roundRect":"rect";n.beginPath();let v=0,y=0,k=0;for(let D=0;D<=u;D++){const M=Math.round(D*_);if(M>v){const P=Math.round(y*r*i),T=Math.round(k*r*i),I=P+T||1;let B=r-P;t.barAlign==="top"?B=0:t.barAlign==="bottom"&&(B=d-I),n[g](v*(h+f),B,h,I,m),v=M,y=0,k=0}const A=Math.abs(s[D]||0),E=Math.abs(a[D]||0);A>y&&(y=A),E>k&&(k=E)}n.fill(),n.closePath()}renderLineWaveform(e,t,n,i){const s=a=>{const u=e[a]||e[0],l=u.length,{height:d}=n.canvas,r=d/2,c=n.canvas.width/l;n.moveTo(0,r);let h=0,f=0;for(let m=0;m<=l;m++){const _=Math.round(m*c);if(_>h){const v=Math.round(f*r*i)||1,y=r+v*(a===0?-1:1);n.lineTo(h,y),h=_,f=0}const g=Math.abs(u[m]||0);g>f&&(f=g)}n.lineTo(h,r)};n.beginPath(),s(0),s(1),n.fill(),n.closePath()}renderWaveform(e,t,n){if(n.fillStyle=this.convertColorValues(t.waveColor),t.renderFunction){t.renderFunction(e,n);return}let i=t.barHeight||1;if(t.normalize){const s=Array.from(e[0]).reduce((a,u)=>Math.max(a,Math.abs(u)),0);i=s?1/s:1}if(t.barWidth||t.barGap||t.barAlign){this.renderBarWaveform(e,t,n,i);return}this.renderLineWaveform(e,t,n,i)}renderSingleCanvas(e,t,n,i,s,a,u,l){const d=window.devicePixelRatio||1,r=document.createElement("canvas"),c=e[0].length;r.width=Math.round(n*(a-s)/c),r.height=i*d,r.style.width=`${Math.floor(r.width/d)}px`,r.style.height=`${i}px`,r.style.left=`${Math.floor(s*n/d/c)}px`,u.appendChild(r);const h=r.getContext("2d");if(this.renderWaveform(e.map(f=>f.slice(s,a)),t,h),r.width>0&&r.height>0){const f=r.cloneNode(),m=f.getContext("2d");m.drawImage(r,0,0),m.globalCompositeOperation="source-in",m.fillStyle=this.convertColorValues(t.progressColor),m.fillRect(0,0,r.width,r.height),l.appendChild(f)}}renderChannel(e,t,n){const i=document.createElement("div"),s=this.getHeight();i.style.height=`${s}px`,this.canvasWrapper.style.minHeight=`${s}px`,this.canvasWrapper.appendChild(i);const a=i.cloneNode();this.progressWrapper.appendChild(a);const{scrollLeft:u,scrollWidth:l,clientWidth:d}=this.scrollContainer,r=e[0].length,c=r/l;let h=Math.min(bt.MAX_CANVAS_WIDTH,d);if(t.barWidth||t.barGap){const M=t.barWidth||.5,A=t.barGap||M/2,E=M+A;h%E!==0&&(h=Math.floor(h/E)*E)}const f=Math.floor(Math.abs(u)*c),m=Math.floor(f+h*c),_=m-f,g=(M,A)=>{this.renderSingleCanvas(e,t,n,s,Math.max(0,M),Math.min(A,r),i,a)},v=this.createDelay(),y=this.createDelay(),k=(M,A)=>{g(M,A),M>0&&v(()=>{k(M-_,A-_)})},D=(M,A)=>{g(M,A),A{D(M+_,A+_)})};k(f,m),mu.timeout&&clearTimeout(u.timeout)),this.timeouts=[],this.canvasWrapper.innerHTML="",this.progressWrapper.innerHTML="",this.wrapper.style.width="",this.options.width!=null&&(this.scrollContainer.style.width=typeof this.options.width=="number"?`${this.options.width}px`:this.options.width);const t=window.devicePixelRatio||1,n=this.scrollContainer.clientWidth,i=Math.ceil(e.duration*(this.options.minPxPerSec||0));this.isScrolling=i>n;const s=this.options.fillParent&&!this.isScrolling,a=(s?n:i)*t;if(this.wrapper.style.width=s?"100%":`${i}px`,this.scrollContainer.style.overflowX=this.isScrolling?"auto":"hidden",this.scrollContainer.classList.toggle("noScrollbar",!!this.options.hideScrollbar),this.cursor.style.backgroundColor=`${this.options.cursorColor||this.options.progressColor}`,this.cursor.style.width=`${this.options.cursorWidth}px`,this.options.splitChannels)for(let u=0;u1&&u.push(e.getChannelData(1)),this.renderChannel(u,this.options,a)}this.audioData=e,this.emit("render")}reRender(){if(!this.audioData)return;const e=this.progressWrapper.clientWidth;this.render(this.audioData);const t=this.progressWrapper.clientWidth;this.scrollContainer.scrollLeft+=t-e}zoom(e){this.options.minPxPerSec=e,this.reRender()}scrollIntoView(e,t=!1){const{clientWidth:n,scrollLeft:i,scrollWidth:s}=this.scrollContainer,a=s*e,u=n/2,l=t&&this.options.autoCenter&&!this.isDragging?u:n;if(a>i+l||a=d&&a{}}start(){this.unsubscribe=this.on("tick",()=>{requestAnimationFrame(()=>{this.emit("tick")})}),this.emit("tick")}stop(){this.unsubscribe()}destroy(){this.unsubscribe()}}var Pt=globalThis&&globalThis.__awaiter||function(o,e,t,n){function i(s){return s instanceof t?s:new t(function(a){a(s)})}return new(t||(t=Promise))(function(s,a){function u(r){try{d(n.next(r))}catch(c){a(c)}}function l(r){try{d(n.throw(r))}catch(c){a(c)}}function d(r){r.done?s(r.value):i(r.value).then(u,l)}d((n=n.apply(o,e||[])).next())})};class io extends vt{constructor(e=new AudioContext){super(),this.bufferNode=null,this.autoplay=!1,this.playStartTime=0,this.playedDuration=0,this._muted=!1,this.buffer=null,this.currentSrc="",this.paused=!0,this.crossOrigin=null,this.audioContext=e,this.gainNode=this.audioContext.createGain(),this.gainNode.connect(this.audioContext.destination)}load(){return Pt(this,void 0,void 0,function*(){})}get src(){return this.currentSrc}set src(e){this.currentSrc=e,fetch(e).then(t=>t.arrayBuffer()).then(t=>this.audioContext.decodeAudioData(t)).then(t=>{this.buffer=t,this.emit("loadedmetadata"),this.emit("canplay"),this.autoplay&&this.play()})}_play(){var e;this.paused&&(this.paused=!1,(e=this.bufferNode)===null||e===void 0||e.disconnect(),this.bufferNode=this.audioContext.createBufferSource(),this.bufferNode.buffer=this.buffer,this.bufferNode.connect(this.gainNode),this.playedDuration>=this.duration&&(this.playedDuration=0),this.bufferNode.start(this.audioContext.currentTime,this.playedDuration),this.playStartTime=this.audioContext.currentTime,this.bufferNode.onended=()=>{this.currentTime>=this.duration&&(this.pause(),this.emit("ended"))})}_pause(){var e;this.paused||(this.paused=!0,(e=this.bufferNode)===null||e===void 0||e.stop(),this.playedDuration+=this.audioContext.currentTime-this.playStartTime)}play(){return Pt(this,void 0,void 0,function*(){this._play(),this.emit("play")})}pause(){this._pause(),this.emit("pause")}setSinkId(e){return Pt(this,void 0,void 0,function*(){return this.audioContext.setSinkId(e)})}get playbackRate(){var e,t;return(t=(e=this.bufferNode)===null||e===void 0?void 0:e.playbackRate.value)!==null&&t!==void 0?t:1}set playbackRate(e){this.bufferNode&&(this.bufferNode.playbackRate.value=e)}get currentTime(){return this.paused?this.playedDuration:this.playedDuration+this.audioContext.currentTime-this.playStartTime}set currentTime(e){this.emit("seeking"),this.paused?this.playedDuration=e:(this._pause(),this.playedDuration=e,this._play()),this.emit("timeupdate")}get duration(){var e;return((e=this.buffer)===null||e===void 0?void 0:e.duration)||0}get volume(){return this.gainNode.gain.value}set volume(e){this.gainNode.gain.value=e,this.emit("volumechange")}get muted(){return this._muted}set muted(e){this._muted!==e&&(this._muted=e,this._muted?this.gainNode.disconnect():this.gainNode.connect(this.audioContext.destination))}getGainNode(){return this.gainNode}}var Pe=globalThis&&globalThis.__awaiter||function(o,e,t,n){function i(s){return s instanceof t?s:new t(function(a){a(s)})}return new(t||(t=Promise))(function(s,a){function u(r){try{d(n.next(r))}catch(c){a(c)}}function l(r){try{d(n.throw(r))}catch(c){a(c)}}function d(r){r.done?s(r.value):i(r.value).then(u,l)}d((n=n.apply(o,e||[])).next())})};const oo={waveColor:"#999",progressColor:"#555",cursorWidth:1,minPxPerSec:0,fillParent:!0,interact:!0,dragToSeek:!1,autoScroll:!0,autoCenter:!0,sampleRate:8e3};class He extends eo{static create(e){return new He(e)}constructor(e){const t=e.media||(e.backend==="WebAudio"?new io:void 0);super({media:t,mediaControls:e.mediaControls,autoplay:e.autoplay,playbackRate:e.audioRate}),this.plugins=[],this.decodedData=null,this.subscriptions=[],this.mediaSubscriptions=[],this.options=Object.assign({},oo,e),this.timer=new no;const n=t?void 0:this.getMediaElement();this.renderer=new bt(this.options,n),this.initPlayerEvents(),this.initRendererEvents(),this.initTimerEvents(),this.initPlugins();const i=this.options.url||this.getSrc();i?this.load(i,this.options.peaks,this.options.duration):this.options.peaks&&this.options.duration&&this.loadPredecoded()}initTimerEvents(){this.subscriptions.push(this.timer.on("tick",()=>{const e=this.getCurrentTime();this.renderer.renderProgress(e/this.getDuration(),!0),this.emit("timeupdate",e),this.emit("audioprocess",e)}))}initPlayerEvents(){this.mediaSubscriptions.push(this.onMediaEvent("timeupdate",()=>{const e=this.getCurrentTime();this.renderer.renderProgress(e/this.getDuration(),this.isPlaying()),this.emit("timeupdate",e)}),this.onMediaEvent("play",()=>{this.emit("play"),this.timer.start()}),this.onMediaEvent("pause",()=>{this.emit("pause"),this.timer.stop()}),this.onMediaEvent("emptied",()=>{this.timer.stop()}),this.onMediaEvent("ended",()=>{this.emit("finish")}),this.onMediaEvent("seeking",()=>{this.emit("seeking",this.getCurrentTime())}))}initRendererEvents(){this.subscriptions.push(this.renderer.on("click",(e,t)=>{this.options.interact&&(this.seekTo(e),this.emit("interaction",e*this.getDuration()),this.emit("click",e,t))}),this.renderer.on("dblclick",(e,t)=>{this.emit("dblclick",e,t)}),this.renderer.on("scroll",(e,t)=>{const n=this.getDuration();this.emit("scroll",e*n,t*n)}),this.renderer.on("render",()=>{this.emit("redraw")}));{let e;this.subscriptions.push(this.renderer.on("drag",t=>{this.options.interact&&(this.renderer.renderProgress(t),clearTimeout(e),e=setTimeout(()=>{this.seekTo(t)},this.isPlaying()?0:200),this.emit("interaction",t*this.getDuration()),this.emit("drag",t))}))}}initPlugins(){var e;!((e=this.options.plugins)===null||e===void 0)&&e.length&&this.options.plugins.forEach(t=>{this.registerPlugin(t)})}unsubscribePlayerEvents(){this.mediaSubscriptions.forEach(e=>e()),this.mediaSubscriptions=[]}setOptions(e){this.options=Object.assign({},this.options,e),this.renderer.setOptions(this.options),e.audioRate&&this.setPlaybackRate(e.audioRate),e.mediaControls!=null&&(this.getMediaElement().controls=e.mediaControls)}registerPlugin(e){return e.init(this),this.plugins.push(e),this.subscriptions.push(e.once("destroy",()=>{this.plugins=this.plugins.filter(t=>t!==e)})),e}getWrapper(){return this.renderer.getWrapper()}getScroll(){return this.renderer.getScroll()}getActivePlugins(){return this.plugins}loadPredecoded(){return Pe(this,void 0,void 0,function*(){this.options.peaks&&this.options.duration&&(this.decodedData=At.createBuffer(this.options.peaks,this.options.duration),yield Promise.resolve(),this.renderDecoded())})}renderDecoded(){return Pe(this,void 0,void 0,function*(){this.decodedData&&(this.emit("decode",this.getDuration()),this.renderer.render(this.decodedData))})}loadAudio(e,t,n,i){return Pe(this,void 0,void 0,function*(){if(this.emit("load",e),!this.options.media&&this.isPlaying()&&this.pause(),this.decodedData=null,!t&&!n){const s=a=>this.emit("loading",a);t=yield xi.fetchBlob(e,s,this.options.fetchParams)}if(this.setSrc(e,t),i=(yield Promise.resolve(i||this.getDuration()))||(yield new Promise(s=>{this.onceMediaEvent("loadedmetadata",()=>s(this.getDuration()))}))||(yield Promise.resolve(0)),n)this.decodedData=At.createBuffer(n,i);else if(t){const s=yield t.arrayBuffer();this.decodedData=yield At.decode(s,this.options.sampleRate)}this.renderDecoded(),this.emit("ready",this.getDuration())})}load(e,t,n){return Pe(this,void 0,void 0,function*(){yield this.loadAudio(e,void 0,t,n)})}loadBlob(e,t,n){return Pe(this,void 0,void 0,function*(){yield this.loadAudio("blob",e,t,n)})}zoom(e){if(!this.decodedData)throw new Error("No audio loaded");this.renderer.zoom(e),this.emit("zoom",e)}getDecodedData(){return this.decodedData}exportPeaks({channels:e=2,maxLength:t=8e3,precision:n=1e4}={}){if(!this.decodedData)throw new Error("The audio has not been decoded yet");const i=Math.min(e,this.decodedData.numberOfChannels),s=[];for(let a=0;ae.destroy()),this.subscriptions.forEach(e=>e()),this.unsubscribePlayerEvents(),this.timer.destroy(),this.renderer.destroy(),super.destroy()}}let In=class{constructor(){this.listeners={},this.on=this.addEventListener,this.un=this.removeEventListener}addEventListener(e,t,n){if(this.listeners[e]||(this.listeners[e]=new Set),this.listeners[e].add(t),n?.once){const i=()=>{this.removeEventListener(e,i),this.removeEventListener(e,t)};return this.addEventListener(e,i),i}return()=>this.removeEventListener(e,t)}removeEventListener(e,t){var n;(n=this.listeners[e])===null||n===void 0||n.delete(t)}once(e,t){return this.on(e,t,{once:!0})}unAll(){this.listeners={}}emit(e,...t){this.listeners[e]&&this.listeners[e].forEach(n=>n(...t))}},so=class extends In{constructor(e){super(),this.subscriptions=[],this.options=e}onInit(){}init(e){this.wavesurfer=e,this.onInit()}destroy(){this.emit("destroy"),this.subscriptions.forEach(e=>e())}};function ut(o,e,t,n,i=5){let s=()=>{};if(!o)return s;const a=u=>{if(u.button===2)return;u.preventDefault(),u.stopPropagation(),o.style.touchAction="none";let l=u.clientX,d=u.clientY,r=!1;const c=m=>{m.preventDefault(),m.stopPropagation();const _=m.clientX,g=m.clientY;if(r||Math.abs(_-l)>=i||Math.abs(g-d)>=i){const{left:v,top:y}=o.getBoundingClientRect();r||(r=!0,t?.(l-v,d-y)),e(_-l,g-d,_-v,g-y),l=_,d=g}},h=m=>{r&&(m.preventDefault(),m.stopPropagation())},f=()=>{o.style.touchAction="",r&&n?.(),s()};document.addEventListener("pointermove",c),document.addEventListener("pointerup",f),document.addEventListener("pointerleave",f),document.addEventListener("click",h,!0),s=()=>{document.removeEventListener("pointermove",c),document.removeEventListener("pointerup",f),document.removeEventListener("pointerleave",f),setTimeout(()=>{document.removeEventListener("click",h,!0)},10)}};return o.addEventListener("pointerdown",a),()=>{s(),o.removeEventListener("pointerdown",a)}}class rn extends In{constructor(e,t,n=0){var i,s,a,u,l,d,r;super(),this.totalDuration=t,this.numberOfChannels=n,this.minLength=0,this.maxLength=1/0,this.id=e.id||`region-${Math.random().toString(32).slice(2)}`,this.start=this.clampPosition(e.start),this.end=this.clampPosition((i=e.end)!==null&&i!==void 0?i:e.start),this.drag=(s=e.drag)===null||s===void 0||s,this.resize=(a=e.resize)===null||a===void 0||a,this.color=(u=e.color)!==null&&u!==void 0?u:"rgba(0, 0, 0, 0.1)",this.minLength=(l=e.minLength)!==null&&l!==void 0?l:this.minLength,this.maxLength=(d=e.maxLength)!==null&&d!==void 0?d:this.maxLength,this.channelIdx=(r=e.channelIdx)!==null&&r!==void 0?r:-1,this.element=this.initElement(),this.setContent(e.content),this.setPart(),this.renderPosition(),this.initMouseEvents()}clampPosition(e){return Math.max(0,Math.min(this.totalDuration,e))}setPart(){const e=this.start===this.end;this.element.setAttribute("part",`${e?"marker":"region"} ${this.id}`)}addResizeHandles(e){const t=document.createElement("div");t.setAttribute("data-resize","left"),t.setAttribute("style",` - position: absolute; - z-index: 2; - width: 6px; - height: 100%; - top: 0; - left: 0; - border-left: 2px solid rgba(0, 0, 0, 0.5); - border-radius: 2px 0 0 2px; - cursor: ew-resize; - word-break: keep-all; - `),t.setAttribute("part","region-handle region-handle-left");const n=t.cloneNode();n.setAttribute("data-resize","right"),n.style.left="",n.style.right="0",n.style.borderRight=n.style.borderLeft,n.style.borderLeft="",n.style.borderRadius="0 2px 2px 0",n.setAttribute("part","region-handle region-handle-right"),e.appendChild(t),e.appendChild(n),ut(t,i=>this.onResize(i,"start"),()=>null,()=>this.onEndResizing(),1),ut(n,i=>this.onResize(i,"end"),()=>null,()=>this.onEndResizing(),1)}removeResizeHandles(e){const t=e.querySelector('[data-resize="left"]'),n=e.querySelector('[data-resize="right"]');t&&e.removeChild(t),n&&e.removeChild(n)}initElement(){const e=document.createElement("div"),t=this.start===this.end;let n=0,i=100;return this.channelIdx>=0&&this.channelIdxthis.emit("click",t)),e.addEventListener("mouseenter",t=>this.emit("over",t)),e.addEventListener("mouseleave",t=>this.emit("leave",t)),e.addEventListener("dblclick",t=>this.emit("dblclick",t)),ut(e,t=>this.onMove(t),()=>this.onStartMoving(),()=>this.onEndMoving()))}onStartMoving(){this.drag&&(this.element.style.cursor="grabbing")}onEndMoving(){this.drag&&(this.element.style.cursor="grab",this.emit("update-end"))}_onUpdate(e,t){if(!this.element.parentElement)return;const n=e/this.element.parentElement.clientWidth*this.totalDuration,i=t&&t!=="start"?this.start:this.start+n,s=t&&t!=="end"?this.end:this.end+n,a=s-i;i>=0&&s<=this.totalDuration&&i<=s&&a>=this.minLength&&a<=this.maxLength&&(this.start=i,this.end=s,this.renderPosition(),this.emit("update"))}onMove(e){this.drag&&this._onUpdate(e)}onResize(e,t){this.resize&&this._onUpdate(e,t)}onEndResizing(){this.resize&&this.emit("update-end")}_setTotalDuration(e){this.totalDuration=e,this.renderPosition()}play(){this.emit("play")}setContent(e){var t;if((t=this.content)===null||t===void 0||t.remove(),e){if(typeof e=="string"){this.content=document.createElement("div");const n=this.start===this.end;this.content.style.padding=`0.2em ${n?.2:.4}em`,this.content.textContent=e}else this.content=e;this.content.setAttribute("part","region-content"),this.element.appendChild(this.content)}else this.content=void 0}setOptions(e){var t,n;if(e.color&&(this.color=e.color,this.element.style.backgroundColor=this.color),e.drag!==void 0&&(this.drag=e.drag,this.element.style.cursor=this.drag?"grab":"default"),e.start!==void 0||e.end!==void 0){const i=this.start===this.end;this.start=this.clampPosition((t=e.start)!==null&&t!==void 0?t:this.start),this.end=this.clampPosition((n=e.end)!==null&&n!==void 0?n:i?this.start:this.end),this.renderPosition(),this.setPart()}if(e.content&&this.setContent(e.content),e.id&&(this.id=e.id,this.setPart()),e.resize!==void 0&&e.resize!==this.resize){const i=this.start===this.end;this.resize=e.resize,this.resize&&!i?this.addResizeHandles(this.element):this.removeResizeHandles(this.element)}}remove(){this.emit("remove"),this.element.remove(),this.element=null}}let ro=class Nn extends so{constructor(e){super(e),this.regions=[],this.regionsContainer=this.initRegionsContainer()}static create(e){return new Nn(e)}onInit(){if(!this.wavesurfer)throw Error("WaveSurfer is not initialized");this.wavesurfer.getWrapper().appendChild(this.regionsContainer);let e=[];this.subscriptions.push(this.wavesurfer.on("timeupdate",t=>{const n=this.regions.filter(i=>i.start<=t&&i.end>=t);n.forEach(i=>{e.includes(i)||this.emit("region-in",i)}),e.forEach(i=>{n.includes(i)||this.emit("region-out",i)}),e=n}))}initRegionsContainer(){const e=document.createElement("div");return e.setAttribute("style",` - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - z-index: 3; - pointer-events: none; - `),e}getRegions(){return this.regions}avoidOverlapping(e){if(!e.content)return;const t=e.content,n=t.getBoundingClientRect().left,i=e.element.scrollWidth,s=this.regions.filter(a=>{if(a===e||!a.content)return!1;const u=a.content.getBoundingClientRect().left,l=a.element.scrollWidth;return n{var u;return((u=a.content)===null||u===void 0?void 0:u.getBoundingClientRect().height)||0}).reduce((a,u)=>a+u,0);t.style.marginTop=`${s}px`}saveRegion(e){this.regionsContainer.appendChild(e.element),this.avoidOverlapping(e),this.regions.push(e);const t=[e.on("update-end",()=>{this.avoidOverlapping(e),this.emit("region-updated",e)}),e.on("play",()=>{var n,i;(n=this.wavesurfer)===null||n===void 0||n.play(),(i=this.wavesurfer)===null||i===void 0||i.setTime(e.start)}),e.on("click",n=>{this.emit("region-clicked",e,n)}),e.on("dblclick",n=>{this.emit("region-double-clicked",e,n)}),e.once("remove",()=>{t.forEach(n=>n()),this.regions=this.regions.filter(n=>n!==e)})];this.subscriptions.push(...t),this.emit("region-created",e)}addRegion(e){var t,n;if(!this.wavesurfer)throw Error("WaveSurfer is not initialized");const i=this.wavesurfer.getDuration(),s=(n=(t=this.wavesurfer)===null||t===void 0?void 0:t.getDecodedData())===null||n===void 0?void 0:n.numberOfChannels,a=new rn(e,i,s);return i?this.saveRegion(a):this.subscriptions.push(this.wavesurfer.once("ready",u=>{a._setTotalDuration(u),this.saveRegion(a)})),a}enableDragSelection(e){var t,n;const i=(n=(t=this.wavesurfer)===null||t===void 0?void 0:t.getWrapper())===null||n===void 0?void 0:n.querySelector("div");if(!i)return()=>{};let s=null,a=0;return ut(i,(u,l,d)=>{s&&s._onUpdate(u,d>a?"end":"start")},u=>{var l,d;if(a=u,!this.wavesurfer)return;const r=this.wavesurfer.getDuration(),c=(d=(l=this.wavesurfer)===null||l===void 0?void 0:l.getDecodedData())===null||d===void 0?void 0:d.numberOfChannels,h=this.wavesurfer.getWrapper().clientWidth,f=u/h*r,m=(u+5)/h*r;s=new rn(Object.assign(Object.assign({},e),{start:f,end:m}),r,c),this.regionsContainer.appendChild(s.element)},()=>{s&&(this.saveRegion(s),s=null)})}clearRegions(){this.regions.forEach(e=>e.remove())}destroy(){this.clearRegions(),super.destroy()}};function ao(o){const e=o.numberOfChannels,t=o.length*e*2+44,n=new ArrayBuffer(t),i=new DataView(n);let s=0;const a=function(u,l,d){for(let r=0;r{const n=new AudioContext,i=o.numberOfChannels,s=o.sampleRate;let a=o.length,u=0;e&&t&&(u=Math.round(e*s),a=Math.round(t*s)-u);const l=n.createBuffer(i,a,s);for(let d=0;d{o&&o.skip(e)},ze=(o,e)=>o/100*(e||5);const{SvelteComponent:lo,append:J,attr:O,check_outros:dt,create_component:qe,destroy_component:Ue,detach:Ce,element:te,empty:uo,group_outros:ct,init:co,insert:Ee,listen:be,mount_component:je,noop:xe,run_all:Hn,safe_not_equal:ho,set_data:fo,space:Te,text:an,transition_in:Y,transition_out:K}=window.__gradio__svelte__internal;function mo(o){let e,t;return e=new ui({}),{c(){qe(e.$$.fragment)},m(n,i){je(e,n,i),t=!0},i(n){t||(Y(e.$$.fragment,n),t=!0)},o(n){K(e.$$.fragment,n),t=!1},d(n){Ue(e,n)}}}function _o(o){let e,t;return e=new On({}),{c(){qe(e.$$.fragment)},m(n,i){je(e,n,i),t=!0},i(n){t||(Y(e.$$.fragment,n),t=!0)},o(n){K(e.$$.fragment,n),t=!1},d(n){Ue(e,n)}}}function ln(o){let e,t,n,i,s;return t=new ci({}),{c(){e=te("button"),qe(t.$$.fragment),O(e,"class","action icon svelte-k0z87h"),O(e,"aria-label","Reset audio")},m(a,u){Ee(a,e,u),je(t,e,null),n=!0,i||(s=be(e,"click",o[26]),i=!0)},p:xe,i(a){n||(Y(t.$$.fragment,a),n=!0)},o(a){K(t.$$.fragment,a),n=!1},d(a){a&&Ce(e),Ue(t),i=!1,s()}}}function un(o){let e,t,n,i;const s=[go,po],a=[];function u(l,d){return l[0]===""?0:1}return e=u(o),t=a[e]=s[e](o),{c(){t.c(),n=uo()},m(l,d){a[e].m(l,d),Ee(l,n,d),i=!0},p(l,d){let r=e;e=u(l),e===r?a[e].p(l,d):(ct(),K(a[r],1,1,()=>{a[r]=null}),dt(),t=a[e],t?t.p(l,d):(t=a[e]=s[e](l),t.c()),Y(t,1),t.m(n.parentNode,n))},i(l){i||(Y(t),i=!0)},o(l){K(t),i=!1},d(l){l&&Ce(n),a[e].d(l)}}}function po(o){let e,t,n,i,s;return{c(){e=te("button"),e.textContent="Trim",t=Te(),n=te("button"),n.textContent="Cancel",O(e,"class","text-button svelte-k0z87h"),O(n,"class","text-button svelte-k0z87h")},m(a,u){Ee(a,e,u),Ee(a,t,u),Ee(a,n,u),i||(s=[be(e,"click",o[11]),be(n,"click",o[13])],i=!0)},p:xe,i:xe,o:xe,d(a){a&&(Ce(e),Ce(t),Ce(n)),i=!1,Hn(s)}}}function go(o){let e,t,n,i,s;return t=new di({}),{c(){e=te("button"),qe(t.$$.fragment),O(e,"class","action icon svelte-k0z87h"),O(e,"aria-label","Trim audio to selection")},m(a,u){Ee(a,e,u),je(t,e,null),n=!0,i||(s=be(e,"click",o[13]),i=!0)},p:xe,i(a){n||(Y(t.$$.fragment,a),n=!0)},o(a){K(t.$$.fragment,a),n=!1},d(a){a&&Ce(e),Ue(t),i=!1,s()}}}function vo(o){let e,t,n,i,s,a,u,l,d,r,c,h,f,m,_,g,v,y,k,D,M,A,E,P,T,I;r=new Si({});const B=[_o,mo],W=[];function b(p,z){return p[4]?0:1}m=b(o),_=W[m]=B[m](o),k=new zi({});let L=o[5]&&o[0]===""&&ln(o),S=o[6]&&un(o);return{c(){e=te("div"),t=te("button"),n=te("span"),i=an(o[9]),s=an("x"),u=Te(),l=te("div"),d=te("button"),qe(r.$$.fragment),h=Te(),f=te("button"),_.c(),v=Te(),y=te("button"),qe(k.$$.fragment),M=Te(),A=te("div"),L&&L.c(),E=Te(),S&&S.c(),O(n,"class","svelte-k0z87h"),O(t,"class","playback icon svelte-k0z87h"),O(t,"aria-label",a=`Adjust playback speed to ${o[10][(o[10].indexOf(o[9])+1)%o[10].length]}x`),O(d,"class","rewind icon svelte-k0z87h"),O(d,"aria-label",c=`Skip backwards by ${ze(o[2],o[8].skip_length)} seconds`),O(f,"class","play-pause-button icon svelte-k0z87h"),O(f,"aria-label",g=o[4]?o[3]("common.play"):o[3]("common.pause")),O(y,"class","skip icon svelte-k0z87h"),O(y,"aria-label",D="Skip forward by "+ze(o[2],o[8].skip_length)+" seconds"),O(l,"class","play-pause-wrapper svelte-k0z87h"),O(A,"class","settings-wrapper svelte-k0z87h"),O(e,"class","controls svelte-k0z87h"),O(e,"data-testid","waveform-controls")},m(p,z){Ee(p,e,z),J(e,t),J(t,n),J(n,i),J(n,s),J(e,u),J(e,l),J(l,d),je(r,d,null),J(l,h),J(l,f),W[m].m(f,null),J(l,v),J(l,y),je(k,y,null),J(e,M),J(e,A),L&&L.m(A,null),J(A,E),S&&S.m(A,null),P=!0,T||(I=[be(t,"click",o[22]),be(d,"click",o[23]),be(f,"click",o[24]),be(y,"click",o[25])],T=!0)},p(p,[z]){(!P||z&512)&&fo(i,p[9]),(!P||z&512&&a!==(a=`Adjust playback speed to ${p[10][(p[10].indexOf(p[9])+1)%p[10].length]}x`))&&O(t,"aria-label",a),(!P||z&260&&c!==(c=`Skip backwards by ${ze(p[2],p[8].skip_length)} seconds`))&&O(d,"aria-label",c);let w=m;m=b(p),m!==w&&(ct(),K(W[w],1,1,()=>{W[w]=null}),dt(),_=W[m],_||(_=W[m]=B[m](p),_.c()),Y(_,1),_.m(f,null)),(!P||z&24&&g!==(g=p[4]?p[3]("common.play"):p[3]("common.pause")))&&O(f,"aria-label",g),(!P||z&260&&D!==(D="Skip forward by "+ze(p[2],p[8].skip_length)+" seconds"))&&O(y,"aria-label",D),p[5]&&p[0]===""?L?(L.p(p,z),z&33&&Y(L,1)):(L=ln(p),L.c(),Y(L,1),L.m(A,E)):L&&(ct(),K(L,1,1,()=>{L=null}),dt()),p[6]?S?(S.p(p,z),z&64&&Y(S,1)):(S=un(p),S.c(),Y(S,1),S.m(A,null)):S&&(ct(),K(S,1,1,()=>{S=null}),dt())},i(p){P||(Y(r.$$.fragment,p),Y(_),Y(k.$$.fragment,p),Y(L),Y(S),P=!0)},o(p){K(r.$$.fragment,p),K(_),K(k.$$.fragment,p),K(L),K(S),P=!1},d(p){p&&Ce(e),Ue(r),W[m].d(),Ue(k),L&&L.d(),S&&S.d(),T=!1,Hn(I)}}}function bo(o,e,t){let{waveform:n}=e,{audioDuration:i}=e,{i18n:s}=e,{playing:a}=e,{showRedo:u=!1}=e,{interactive:l=!1}=e,{handle_trim_audio:d}=e,{mode:r=""}=e,{container:c}=e,{handle_reset_value:h}=e,{waveform_settings:f={}}=e,{trimDuration:m=0}=e,_=[.5,1,1.5,2],g=_[1],v,y=null,k,D,M="";const A=()=>{t(18,y=v.addRegion({start:i/4,end:i/2,color:"hsla(15, 85%, 40%, 0.4)",drag:!0,resize:!0})),t(14,m=y.end-y.start)},E=()=>{if(n&&v&&y){const p=y.start,z=y.end;d(p,z),t(0,r=""),t(18,y=null)}},P=()=>{v?.getRegions().forEach(p=>{p.remove()}),v?.clearRegions()},T=()=>{P(),r==="edit"?t(0,r=""):(t(0,r="edit"),A())},I=(p,z)=>{let w,j;y&&(p==="left"?z==="ArrowLeft"?(w=y.start-.05,j=y.end):(w=y.start+.05,j=y.end):z==="ArrowLeft"?(w=y.start,j=y.end-.05):(w=y.start,j=y.end+.05),y.setOptions({start:w,end:j}),t(14,m=y.end-y.start))},B=()=>{t(9,g=_[(_.indexOf(g)+1)%_.length]),n.setPlaybackRate(g)},W=()=>n.skip(ze(i,f.skip_length)*-1),b=()=>n.playPause(),L=()=>n.skip(ze(i,f.skip_length)),S=()=>{h(),P(),t(0,r="")};return o.$$set=p=>{"waveform"in p&&t(1,n=p.waveform),"audioDuration"in p&&t(2,i=p.audioDuration),"i18n"in p&&t(3,s=p.i18n),"playing"in p&&t(4,a=p.playing),"showRedo"in p&&t(5,u=p.showRedo),"interactive"in p&&t(6,l=p.interactive),"handle_trim_audio"in p&&t(15,d=p.handle_trim_audio),"mode"in p&&t(0,r=p.mode),"container"in p&&t(16,c=p.container),"handle_reset_value"in p&&t(7,h=p.handle_reset_value),"waveform_settings"in p&&t(8,f=p.waveform_settings),"trimDuration"in p&&t(14,m=p.trimDuration)},o.$$.update=()=>{if(o.$$.dirty&2&&t(17,v=n.registerPlugin(ro.create())),o.$$.dirty&131072&&v?.on("region-out",p=>{p.play()}),o.$$.dirty&131072&&v?.on("region-updated",p=>{t(14,m=p.end-p.start)}),o.$$.dirty&131072&&v?.on("region-clicked",(p,z)=>{z.stopPropagation(),t(18,y=p),p.play()}),o.$$.dirty&2031616&&y){const p=c.children[0].shadowRoot;t(20,D=p.querySelector('[data-resize="right"]')),t(19,k=p.querySelector('[data-resize="left"]')),k&&D&&(k.setAttribute("role","button"),D.setAttribute("role","button"),k?.setAttribute("aria-label","Drag to adjust start time"),D?.setAttribute("aria-label","Drag to adjust end time"),k?.setAttribute("tabindex","0"),D?.setAttribute("tabindex","0"),k.addEventListener("focus",()=>{v&&t(21,M="left")}),D.addEventListener("focus",()=>{v&&t(21,M="right")}))}o.$$.dirty&2228224&&v&&window.addEventListener("keydown",p=>{p.key==="ArrowLeft"?I(M,"ArrowLeft"):p.key==="ArrowRight"&&I(M,"ArrowRight")})},[r,n,i,s,a,u,l,h,f,g,_,E,P,T,m,d,c,v,y,k,D,M,B,W,b,L,S]}class qn extends lo{constructor(e){super(),co(this,e,bo,vo,ho,{waveform:1,audioDuration:2,i18n:3,playing:4,showRedo:5,interactive:6,handle_trim_audio:15,mode:0,container:16,handle_reset_value:7,waveform_settings:8,trimDuration:14})}}const{SvelteComponent:wo,add_flush_callback:dn,append:se,attr:$,bind:cn,binding_callbacks:et,check_outros:Un,create_component:Ut,destroy_component:jt,detach:Ft,element:ge,empty:yo,group_outros:jn,init:ko,insert:Vt,mount_component:Xt,safe_not_equal:Co,set_data:Eo,space:lt,text:Ro,transition_in:we,transition_out:De}=window.__gradio__svelte__internal,{onMount:So}=window.__gradio__svelte__internal;function Do(o){let e,t,n,i,s,a,u,l,d,r,c,h,f,m=o[0]==="edit"&&o[13]>0&&hn(o),_=o[8]&&fn(o);return{c(){e=ge("div"),t=ge("div"),n=ge("div"),i=lt(),s=ge("div"),a=ge("time"),a.textContent="0:00",u=lt(),l=ge("div"),m&&m.c(),d=lt(),r=ge("time"),r.textContent="0:00",c=lt(),_&&_.c(),$(n,"id","waveform"),$(n,"class","svelte-15pl8d9"),$(t,"class","waveform-container svelte-15pl8d9"),$(a,"id","time"),$(a,"class","svelte-15pl8d9"),$(r,"id","duration"),$(r,"class","svelte-15pl8d9"),$(s,"class","timestamps svelte-15pl8d9"),$(e,"class","component-wrapper svelte-15pl8d9"),$(e,"data-testid",h=o[2]?"waveform-"+o[2]:"unlabelled-audio")},m(g,v){Vt(g,e,v),se(e,t),se(t,n),o[19](n),se(e,i),se(e,s),se(s,a),o[20](a),se(s,u),se(s,l),m&&m.m(l,null),se(l,d),se(l,r),o[21](r),se(e,c),_&&_.m(e,null),f=!0},p(g,v){g[0]==="edit"&&g[13]>0?m?m.p(g,v):(m=hn(g),m.c(),m.m(l,d)):m&&(m.d(1),m=null),g[8]?_?(_.p(g,v),v&256&&we(_,1)):(_=fn(g),_.c(),we(_,1),_.m(e,null)):_&&(jn(),De(_,1,1,()=>{_=null}),Un()),(!f||v&4&&h!==(h=g[2]?"waveform-"+g[2]:"unlabelled-audio"))&&$(e,"data-testid",h)},i(g){f||(we(_),f=!0)},o(g){De(_),f=!1},d(g){g&&Ft(e),o[19](null),o[20](null),m&&m.d(),o[21](null),_&&_.d()}}}function Mo(o){let e,t;return e=new zn({props:{size:"small",$$slots:{default:[Lo]},$$scope:{ctx:o}}}),{c(){Ut(e.$$.fragment)},m(n,i){Xt(e,n,i),t=!0},p(n,i){const s={};i&67108864&&(s.$$scope={dirty:i,ctx:n}),e.$set(s)},i(n){t||(we(e.$$.fragment,n),t=!0)},o(n){De(e.$$.fragment,n),t=!1},d(n){jt(e,n)}}}function hn(o){let e,t=o[14](o[13])+"",n;return{c(){e=ge("time"),n=Ro(t),$(e,"id","trim-duration"),$(e,"class","svelte-15pl8d9")},m(i,s){Vt(i,e,s),se(e,n)},p(i,s){s&8192&&t!==(t=i[14](i[13])+"")&&Eo(n,t)},d(i){i&&Ft(e)}}}function fn(o){let e,t,n,i;function s(l){o[22](l)}function a(l){o[23](l)}let u={container:o[7],waveform:o[8],playing:o[11],audioDuration:o[12],i18n:o[3],interactive:o[4],handle_trim_audio:o[15],showRedo:o[4],handle_reset_value:o[6],waveform_settings:o[5]};return o[0]!==void 0&&(u.mode=o[0]),o[13]!==void 0&&(u.trimDuration=o[13]),e=new qn({props:u}),et.push(()=>cn(e,"mode",s)),et.push(()=>cn(e,"trimDuration",a)),{c(){Ut(e.$$.fragment)},m(l,d){Xt(e,l,d),i=!0},p(l,d){const r={};d&128&&(r.container=l[7]),d&256&&(r.waveform=l[8]),d&2048&&(r.playing=l[11]),d&4096&&(r.audioDuration=l[12]),d&8&&(r.i18n=l[3]),d&16&&(r.interactive=l[4]),d&16&&(r.showRedo=l[4]),d&64&&(r.handle_reset_value=l[6]),d&32&&(r.waveform_settings=l[5]),!t&&d&1&&(t=!0,r.mode=l[0],dn(()=>t=!1)),!n&&d&8192&&(n=!0,r.trimDuration=l[13],dn(()=>n=!1)),e.$set(r)},i(l){i||(we(e.$$.fragment,l),i=!0)},o(l){De(e.$$.fragment,l),i=!1},d(l){jt(e,l)}}}function Lo(o){let e,t;return e=new gt({}),{c(){Ut(e.$$.fragment)},m(n,i){Xt(e,n,i),t=!0},i(n){t||(we(e.$$.fragment,n),t=!0)},o(n){De(e.$$.fragment,n),t=!1},d(n){jt(e,n)}}}function Ao(o){let e,t,n,i;const s=[Mo,Do],a=[];function u(l,d){return l[1]===null?0:1}return e=u(o),t=a[e]=s[e](o),{c(){t.c(),n=yo()},m(l,d){a[e].m(l,d),Vt(l,n,d),i=!0},p(l,[d]){let r=e;e=u(l),e===r?a[e].p(l,d):(jn(),De(a[r],1,1,()=>{a[r]=null}),Un(),t=a[e],t?t.p(l,d):(t=a[e]=s[e](l),t.c()),we(t,1),t.m(n.parentNode,n))},i(l){i||(we(t),i=!0)},o(l){De(t),i=!1},d(l){l&&Ft(n),a[e].d(l)}}}function Po(o,e,t){let{value:n=null}=e,{label:i}=e,{autoplay:s}=e,{i18n:a}=e,{dispatch:u}=e,{dispatch_blob:l=()=>Promise.resolve()}=e,{interactive:d=!1}=e,{waveform_settings:r={}}=e,{mode:c=""}=e,{handle_reset_value:h=()=>{}}=e,f,m,_=!1,g,v,y,k=0;const D=b=>{const L=Math.floor(b/60),p=`0${Math.round(b)%60}`.slice(-2);return`${L}:${p}`},M=()=>{t(8,m=He.create({container:f,url:n?.url,...r}))},A=async(b,L)=>{t(0,c="");const S=m.getDecodedData();S&&await Bt(S,b,L).then(async p=>{await l([p],"change"),m.destroy(),M()}),u("edit")};async function E(b){await hi(b).then(L=>{if(L)return m?.load(L)})}So(()=>{window.addEventListener("keydown",b=>{b.key==="ArrowRight"&&c!=="edit"?mt(m,.1):b.key==="ArrowLeft"&&c!=="edit"&&mt(m,-.1)})});function P(b){et[b?"unshift":"push"](()=>{f=b,t(7,f),t(8,m)})}function T(b){et[b?"unshift":"push"](()=>{g=b,t(9,g),t(8,m)})}function I(b){et[b?"unshift":"push"](()=>{v=b,t(10,v),t(8,m)})}function B(b){c=b,t(0,c)}function W(b){k=b,t(13,k)}return o.$$set=b=>{"value"in b&&t(1,n=b.value),"label"in b&&t(2,i=b.label),"autoplay"in b&&t(16,s=b.autoplay),"i18n"in b&&t(3,a=b.i18n),"dispatch"in b&&t(17,u=b.dispatch),"dispatch_blob"in b&&t(18,l=b.dispatch_blob),"interactive"in b&&t(4,d=b.interactive),"waveform_settings"in b&&t(5,r=b.waveform_settings),"mode"in b&&t(0,c=b.mode),"handle_reset_value"in b&&t(6,h=b.handle_reset_value)},o.$$.update=()=>{o.$$.dirty&384&&f!==void 0&&(m!==void 0&&m.destroy(),t(7,f.innerHTML="",f),M(),t(11,_=!1)),o.$$.dirty&65792&&s&&(m?.play(),t(11,_=!0)),o.$$.dirty&1280&&m?.on("decode",b=>{t(12,y=b),v&&t(10,v.textContent=D(b),v)}),o.$$.dirty&768&&m?.on("timeupdate",b=>g&&t(9,g.textContent=D(b),g)),o.$$.dirty&131328&&m?.on("finish",()=>{t(11,_=!1),u("stop"),u("end")}),o.$$.dirty&131328&&m?.on("pause",()=>{t(11,_=!1),u("pause")}),o.$$.dirty&131328&&m?.on("play",()=>{t(11,_=!0),u("play")}),o.$$.dirty&2&&n?.url&&E(n.url)},[c,n,i,a,d,r,h,f,m,g,v,_,y,k,D,A,s,u,l,P,T,I,B,W]}class To extends wo{constructor(e){super(),ko(this,e,Po,Ao,Co,{value:1,label:2,autoplay:16,i18n:3,dispatch:17,dispatch_blob:18,interactive:4,waveform_settings:5,mode:0,handle_reset_value:6})}}const Fn=To;const{SvelteComponent:Wo,append:zo,attr:We,bubble:mn,check_outros:Ot,create_component:Xe,destroy_component:Ge,detach:tt,element:Vn,empty:Bo,group_outros:It,init:Oo,insert:nt,mount_component:Ye,safe_not_equal:Io,space:Nt,transition_in:Q,transition_out:ie}=window.__gradio__svelte__internal,{createEventDispatcher:No}=window.__gradio__svelte__internal;function Ho(o){let e,t;return e=new zn({props:{size:"small",$$slots:{default:[Uo]},$$scope:{ctx:o}}}),{c(){Xe(e.$$.fragment)},m(n,i){Ye(e,n,i),t=!0},p(n,i){const s={};i&4096&&(s.$$scope={dirty:i,ctx:n}),e.$set(s)},i(n){t||(Q(e.$$.fragment,n),t=!0)},o(n){ie(e.$$.fragment,n),t=!1},d(n){Ge(e,n)}}}function qo(o){let e,t,n,i,s,a=o[4]&&_n(o),u=o[5]&&pn(o);return i=new Fn({props:{value:o[0],label:o[1],autoplay:o[3],i18n:o[6],dispatch:o[8],waveform_settings:o[7]}}),{c(){e=Vn("div"),a&&a.c(),t=Nt(),u&&u.c(),n=Nt(),Xe(i.$$.fragment),We(e,"class","icon-buttons svelte-rvdo70")},m(l,d){nt(l,e,d),a&&a.m(e,null),zo(e,t),u&&u.m(e,null),nt(l,n,d),Ye(i,l,d),s=!0},p(l,d){l[4]?a?(a.p(l,d),d&16&&Q(a,1)):(a=_n(l),a.c(),Q(a,1),a.m(e,t)):a&&(It(),ie(a,1,1,()=>{a=null}),Ot()),l[5]?u?(u.p(l,d),d&32&&Q(u,1)):(u=pn(l),u.c(),Q(u,1),u.m(e,null)):u&&(It(),ie(u,1,1,()=>{u=null}),Ot());const r={};d&1&&(r.value=l[0]),d&2&&(r.label=l[1]),d&8&&(r.autoplay=l[3]),d&64&&(r.i18n=l[6]),d&128&&(r.waveform_settings=l[7]),i.$set(r)},i(l){s||(Q(a),Q(u),Q(i.$$.fragment,l),s=!0)},o(l){ie(a),ie(u),ie(i.$$.fragment,l),s=!1},d(l){l&&(tt(e),tt(n)),a&&a.d(),u&&u.d(),Ge(i,l)}}}function Uo(o){let e,t;return e=new gt({}),{c(){Xe(e.$$.fragment)},m(n,i){Ye(e,n,i),t=!0},i(n){t||(Q(e.$$.fragment,n),t=!0)},o(n){ie(e.$$.fragment,n),t=!1},d(n){Ge(e,n)}}}function _n(o){let e,t,n,i,s;return t=new ri({props:{Icon:li,label:o[6]("common.download")}}),{c(){e=Vn("a"),Xe(t.$$.fragment),We(e,"href",n=o[0].url),We(e,"target",window.__is_colab__?"_blank":null),We(e,"download",i=o[0].url)},m(a,u){nt(a,e,u),Ye(t,e,null),s=!0},p(a,u){const l={};u&64&&(l.label=a[6]("common.download")),t.$set(l),(!s||u&1&&n!==(n=a[0].url))&&We(e,"href",n),(!s||u&1&&i!==(i=a[0].url))&&We(e,"download",i)},i(a){s||(Q(t.$$.fragment,a),s=!0)},o(a){ie(t.$$.fragment,a),s=!1},d(a){a&&tt(e),Ge(t)}}}function pn(o){let e,t;return e=new ai({props:{i18n:o[6],formatter:o[9],value:o[0]}}),e.$on("error",o[10]),e.$on("share",o[11]),{c(){Xe(e.$$.fragment)},m(n,i){Ye(e,n,i),t=!0},p(n,i){const s={};i&64&&(s.i18n=n[6]),i&1&&(s.value=n[0]),e.$set(s)},i(n){t||(Q(e.$$.fragment,n),t=!0)},o(n){ie(e.$$.fragment,n),t=!1},d(n){Ge(e,n)}}}function jo(o){let e,t,n,i,s,a;e=new Wn({props:{show_label:o[2],Icon:gt,float:!1,label:o[1]||o[6]("audio.audio")}});const u=[qo,Ho],l=[];function d(r,c){return r[0]!==null?0:1}return n=d(o),i=l[n]=u[n](o),{c(){Xe(e.$$.fragment),t=Nt(),i.c(),s=Bo()},m(r,c){Ye(e,r,c),nt(r,t,c),l[n].m(r,c),nt(r,s,c),a=!0},p(r,[c]){const h={};c&4&&(h.show_label=r[2]),c&66&&(h.label=r[1]||r[6]("audio.audio")),e.$set(h);let f=n;n=d(r),n===f?l[n].p(r,c):(It(),ie(l[f],1,1,()=>{l[f]=null}),Ot(),i=l[n],i?i.p(r,c):(i=l[n]=u[n](r),i.c()),Q(i,1),i.m(s.parentNode,s))},i(r){a||(Q(e.$$.fragment,r),Q(i),a=!0)},o(r){ie(e.$$.fragment,r),ie(i),a=!1},d(r){r&&(tt(t),tt(s)),Ge(e,r),l[n].d(r)}}}function Fo(o,e,t){let{value:n=null}=e,{label:i}=e,{show_label:s=!0}=e,{autoplay:a}=e,{show_download_button:u=!0}=e,{show_share_button:l=!1}=e,{i18n:d}=e,{waveform_settings:r={}}=e;const c=No(),h=async _=>_?``:"";function f(_){mn.call(this,o,_)}function m(_){mn.call(this,o,_)}return o.$$set=_=>{"value"in _&&t(0,n=_.value),"label"in _&&t(1,i=_.label),"show_label"in _&&t(2,s=_.show_label),"autoplay"in _&&t(3,a=_.autoplay),"show_download_button"in _&&t(4,u=_.show_download_button),"show_share_button"in _&&t(5,l=_.show_share_button),"i18n"in _&&t(6,d=_.i18n),"waveform_settings"in _&&t(7,r=_.waveform_settings)},o.$$.update=()=>{o.$$.dirty&1&&n&&c("change",n)},[n,i,s,a,u,l,d,r,c,h,f,m]}class Vo extends Wo{constructor(e){super(),Oo(this,e,Fo,jo,Io,{value:0,label:1,show_label:2,autoplay:3,show_download_button:4,show_share_button:5,i18n:6,waveform_settings:7})}}const Xo=Vo;function Tt(o,e,t,n){return new(t||(t=Promise))(function(i,s){function a(d){try{l(n.next(d))}catch(r){s(r)}}function u(d){try{l(n.throw(d))}catch(r){s(r)}}function l(d){var r;d.done?i(d.value):(r=d.value,r instanceof t?r:new t(function(c){c(r)})).then(a,u)}l((n=n.apply(o,e||[])).next())})}class Go{constructor(){this.listeners={},this.on=this.addEventListener,this.un=this.removeEventListener}addEventListener(e,t,n){if(this.listeners[e]||(this.listeners[e]=new Set),this.listeners[e].add(t),n?.once){const i=()=>{this.removeEventListener(e,i),this.removeEventListener(e,t)};return this.addEventListener(e,i),i}return()=>this.removeEventListener(e,t)}removeEventListener(e,t){var n;(n=this.listeners[e])===null||n===void 0||n.delete(t)}once(e,t){return this.on(e,t,{once:!0})}unAll(){this.listeners={}}emit(e,...t){this.listeners[e]&&this.listeners[e].forEach(n=>n(...t))}}class Yo extends Go{constructor(e){super(),this.subscriptions=[],this.options=e}onInit(){}init(e){this.wavesurfer=e,this.onInit()}destroy(){this.emit("destroy"),this.subscriptions.forEach(e=>e())}}const Zo=["audio/webm","audio/wav","audio/mpeg","audio/mp4","audio/mp3"];class it extends Yo{constructor(e){var t;super(Object.assign(Object.assign({},e),{audioBitsPerSecond:(t=e.audioBitsPerSecond)!==null&&t!==void 0?t:128e3})),this.stream=null,this.mediaRecorder=null}static create(e){return new it(e||{})}renderMicStream(e){const t=new AudioContext,n=t.createMediaStreamSource(e),i=t.createAnalyser();n.connect(i);const s=i.frequencyBinCount,a=new Float32Array(s),u=s/t.sampleRate;let l;const d=()=>{i.getFloatTimeDomainData(a),this.wavesurfer&&(this.wavesurfer.options.cursorWidth=0,this.wavesurfer.options.interact=!1,this.wavesurfer.load("",[a],u)),l=requestAnimationFrame(d)};return d(),()=>{cancelAnimationFrame(l),n?.disconnect(),t?.close()}}startMic(e){return Tt(this,void 0,void 0,function*(){let t;try{t=yield navigator.mediaDevices.getUserMedia({audio:!e?.deviceId||{deviceId:e.deviceId}})}catch(i){throw new Error("Error accessing the microphone: "+i.message)}const n=this.renderMicStream(t);return this.subscriptions.push(this.once("destroy",n)),this.stream=t,t})}stopMic(){this.stream&&(this.stream.getTracks().forEach(e=>e.stop()),this.stream=null,this.mediaRecorder=null)}startRecording(e){return Tt(this,void 0,void 0,function*(){const t=this.stream||(yield this.startMic(e)),n=this.mediaRecorder||new MediaRecorder(t,{mimeType:this.options.mimeType||Zo.find(s=>MediaRecorder.isTypeSupported(s)),audioBitsPerSecond:this.options.audioBitsPerSecond});this.mediaRecorder=n,this.stopRecording();const i=[];n.ondataavailable=s=>{s.data.size>0&&i.push(s.data)},n.onstop=()=>{var s;const a=new Blob(i,{type:n.mimeType});this.emit("record-end",a),this.options.renderRecordedAudio!==!1&&((s=this.wavesurfer)===null||s===void 0||s.load(URL.createObjectURL(a)))},n.start(),this.emit("record-start")})}isRecording(){var e;return((e=this.mediaRecorder)===null||e===void 0?void 0:e.state)==="recording"}isPaused(){var e;return((e=this.mediaRecorder)===null||e===void 0?void 0:e.state)==="paused"}stopRecording(){var e;this.isRecording()&&((e=this.mediaRecorder)===null||e===void 0||e.stop())}pauseRecording(){var e;this.isRecording()&&((e=this.mediaRecorder)===null||e===void 0||e.pause(),this.emit("record-pause"))}resumeRecording(){var e;this.isPaused()&&((e=this.mediaRecorder)===null||e===void 0||e.resume(),this.emit("record-resume"))}static getAvailableAudioDevices(){return Tt(this,void 0,void 0,function*(){return navigator.mediaDevices.enumerateDevices().then(e=>e.filter(t=>t.kind==="audioinput"))})}destroy(){super.destroy(),this.stopRecording(),this.stopMic()}}const{SvelteComponent:Jo,append:F,attr:G,create_component:Ko,destroy_component:Qo,destroy_each:$o,detach:wt,element:ae,empty:xo,ensure_array_like:gn,init:es,insert:yt,listen:Ke,mount_component:ts,run_all:ns,safe_not_equal:is,set_data:Be,set_input_value:Ht,space:Qe,text:Oe,transition_in:os,transition_out:ss}=window.__gradio__svelte__internal,{onMount:rs}=window.__gradio__svelte__internal;function vn(o,e,t){const n=o.slice();return n[14]=e[t],n}function as(o){let e,t=gn(o[2]),n=[];for(let i=0;i{u=document.getElementById("record"),l=document.getElementById("pause"),d=document.getElementById("resume"),r=document.getElementById("stop"),c=document.getElementById("stop-paused")});const h=()=>n.startRecording(),f=()=>{n.isPaused()&&(n.resumeRecording(),n.stopRecording()),n.stopRecording()},m=()=>{n.isPaused()&&(n.resumeRecording(),n.stopRecording()),n.stopRecording()},_=()=>n.pauseRecording(),g=()=>n.resumeRecording();return o.$$set=v=>{"record"in v&&t(0,n=v.record),"i18n"in v&&t(1,i=v.i18n),"dispatch"in v&&t(3,s=v.dispatch)},o.$$.update=()=>{if(o.$$.dirty&10)try{let v=[];it.getAvailableAudioDevices().then(y=>{t(2,a=y),y.forEach(k=>{k.deviceId&&v.push(k)}),t(2,a=v)})}catch(v){throw v instanceof DOMException&&v.name=="NotAllowedError"&&s("error",i("audio.allow_recording_access")),v}o.$$.dirty&1&&n.on("record-start",()=>{n.startMic(),u.style.display="none",r.style.display="flex",l.style.display="block"}),o.$$.dirty&1&&n.on("record-end",()=>{n.isPaused()&&(n.resumeRecording(),n.stopRecording()),n.stopMic(),u.style.display="flex",r.style.display="none",l.style.display="none",u.disabled=!1}),o.$$.dirty&1&&n.on("record-pause",()=>{l.style.display="none",d.style.display="block",r.style.display="none",c.style.display="flex"}),o.$$.dirty&1&&n.on("record-resume",()=>{l.style.display="block",d.style.display="none",u.style.display="none",r.style.display="flex",c.style.display="none"})},[n,i,a,s,h,f,m,_,g]}class cs extends Jo{constructor(e){super(),es(this,e,ds,us,is,{record:0,i18n:1,dispatch:3})}}const{SvelteComponent:hs,add_flush_callback:ht,append:ne,attr:Z,bind:ft,binding_callbacks:Re,check_outros:wn,create_component:Xn,destroy_component:Gn,detach:ot,element:he,group_outros:yn,init:fs,insert:st,mount_component:Yn,noop:ms,safe_not_equal:_s,set_data:Zn,space:Ie,text:Jn,transition_in:ve,transition_out:Ne}=window.__gradio__svelte__internal,{onMount:ps}=window.__gradio__svelte__internal;function kn(o){let e,t,n,i,s,a=o[0]==="edit"&&o[16]>0&&Cn(o);function u(r,c){return r[15]?vs:gs}let l=u(o),d=l(o);return{c(){e=he("div"),t=he("time"),t.textContent="0:00",n=Ie(),i=he("div"),a&&a.c(),s=Ie(),d.c(),Z(t,"id","time"),Z(t,"class","svelte-imtedr"),Z(e,"id","timestamps"),Z(e,"class","svelte-imtedr")},m(r,c){st(r,e,c),ne(e,t),o[22](t),ne(e,n),ne(e,i),a&&a.m(i,null),ne(i,s),d.m(i,null)},p(r,c){r[0]==="edit"&&r[16]>0?a?a.p(r,c):(a=Cn(r),a.c(),a.m(i,s)):a&&(a.d(1),a=null),l===(l=u(r))&&d?d.p(r,c):(d.d(1),d=l(r),d&&(d.c(),d.m(i,null)))},d(r){r&&ot(e),o[22](null),a&&a.d(),d.d()}}}function Cn(o){let e,t=o[17](o[16])+"",n;return{c(){e=he("time"),n=Jn(t),Z(e,"id","trim-duration"),Z(e,"class","svelte-imtedr")},m(i,s){st(i,e,s),ne(e,n)},p(i,s){s&65536&&t!==(t=i[17](i[16])+"")&&Zn(n,t)},d(i){i&&ot(e)}}}function gs(o){let e;return{c(){e=he("time"),e.textContent="0:00",Z(e,"id","duration"),Z(e,"class","svelte-imtedr")},m(t,n){st(t,e,n),o[23](e)},p:ms,d(t){t&&ot(e),o[23](null)}}}function vs(o){let e,t=o[17](o[14])+"",n;return{c(){e=he("time"),n=Jn(t),Z(e,"id","duration"),Z(e,"class","svelte-imtedr")},m(i,s){st(i,e,s),ne(e,n)},p(i,s){s&16384&&t!==(t=i[17](i[14])+"")&&Zn(n,t)},d(i){i&&ot(e)}}}function En(o){let e,t,n;function i(a){o[24](a)}let s={i18n:o[1],dispatch:o[2]};return o[6]!==void 0&&(s.record=o[6]),e=new cs({props:s}),Re.push(()=>ft(e,"record",i)),{c(){Xn(e.$$.fragment)},m(a,u){Yn(e,a,u),n=!0},p(a,u){const l={};u&2&&(l.i18n=a[1]),u&4&&(l.dispatch=a[2]),!t&&u&64&&(t=!0,l.record=a[6],ht(()=>t=!1)),e.$set(l)},i(a){n||(ve(e.$$.fragment,a),n=!0)},o(a){Ne(e.$$.fragment,a),n=!1},d(a){Gn(e,a)}}}function Rn(o){let e,t,n,i,s;function a(r){o[25](r)}function u(r){o[26](r)}function l(r){o[27](r)}let d={container:o[12],playing:o[11],audioDuration:o[13],i18n:o[1],interactive:!0,handle_trim_audio:o[18],showRedo:!0,handle_reset_value:o[4],waveform_settings:o[3]};return o[5]!==void 0&&(d.waveform=o[5]),o[16]!==void 0&&(d.trimDuration=o[16]),o[0]!==void 0&&(d.mode=o[0]),e=new qn({props:d}),Re.push(()=>ft(e,"waveform",a)),Re.push(()=>ft(e,"trimDuration",u)),Re.push(()=>ft(e,"mode",l)),{c(){Xn(e.$$.fragment)},m(r,c){Yn(e,r,c),s=!0},p(r,c){const h={};c&4096&&(h.container=r[12]),c&2048&&(h.playing=r[11]),c&8192&&(h.audioDuration=r[13]),c&2&&(h.i18n=r[1]),c&16&&(h.handle_reset_value=r[4]),c&8&&(h.waveform_settings=r[3]),!t&&c&32&&(t=!0,h.waveform=r[5],ht(()=>t=!1)),!n&&c&65536&&(n=!0,h.trimDuration=r[16],ht(()=>n=!1)),!i&&c&1&&(i=!0,h.mode=r[0],ht(()=>i=!1)),e.$set(h)},i(r){s||(ve(e.$$.fragment,r),s=!0)},o(r){Ne(e.$$.fragment,r),s=!1},d(r){Gn(e,r)}}}function bs(o){let e,t,n,i,s,a,u,l,d=(o[15]||o[7])&&kn(o),r=o[10]&&!o[7]&&En(o),c=o[5]&&o[7]&&Rn(o);return{c(){e=he("div"),t=he("div"),n=Ie(),i=he("div"),s=Ie(),d&&d.c(),a=Ie(),r&&r.c(),u=Ie(),c&&c.c(),Z(t,"id","microphone"),Z(t,"data-testid","microphone-waveform"),Z(t,"class","svelte-imtedr"),Z(i,"id","recording"),Z(e,"class","component-wrapper svelte-imtedr")},m(h,f){st(h,e,f),ne(e,t),ne(e,n),ne(e,i),o[21](i),ne(e,s),d&&d.m(e,null),ne(e,a),r&&r.m(e,null),ne(e,u),c&&c.m(e,null),l=!0},p(h,[f]){h[15]||h[7]?d?d.p(h,f):(d=kn(h),d.c(),d.m(e,a)):d&&(d.d(1),d=null),h[10]&&!h[7]?r?(r.p(h,f),f&1152&&ve(r,1)):(r=En(h),r.c(),ve(r,1),r.m(e,u)):r&&(yn(),Ne(r,1,1,()=>{r=null}),wn()),h[5]&&h[7]?c?(c.p(h,f),f&160&&ve(c,1)):(c=Rn(h),c.c(),ve(c,1),c.m(e,null)):c&&(yn(),Ne(c,1,1,()=>{c=null}),wn())},i(h){l||(ve(r),ve(c),l=!0)},o(h){Ne(r),Ne(c),l=!1},d(h){h&&ot(e),o[21](null),d&&d.d(),r&&r.d(),c&&c.d()}}}function ws(o,e,t){let{mode:n}=e,{i18n:i}=e,{dispatch:s}=e,{dispatch_blob:a}=e,{waveform_settings:u={}}=e,{handle_reset_value:l}=e,d,r,c=!1,h,f,m=null,_,g,v,y=0,k,D=!1,M=0;const A=()=>{clearInterval(k),t(20,k=setInterval(()=>{t(14,y++,y)},1e3))},E=w=>{const j=Math.floor(w/60),re=`0${Math.round(w)%60}`.slice(-2);return`${j}:${re}`},P=()=>{const w=document.getElementById("microphone");w&&(w.innerHTML=""),d!==void 0&&d.destroy(),w&&(t(10,d=He.create({...u,container:w})),t(6,f=d.registerPlugin(it.create())),f.startMic())},T=()=>{let w=document.getElementById("recording");!m||!w||t(5,r=He.create({container:w,url:m,...u}))},I=async(w,j)=>{t(0,n="edit");const oe=r.getDecodedData();oe&&await Bt(oe,w,j).then(async re=>{await a([re],"change"),r.destroy(),T()}),s("edit")};ps(()=>{P(),window.addEventListener("keydown",w=>{w.key==="ArrowRight"?mt(r,.1):w.key==="ArrowLeft"&&mt(r,-.1)})});function B(w){Re[w?"unshift":"push"](()=>{h=w,t(12,h)})}function W(w){Re[w?"unshift":"push"](()=>{_=w,t(8,_),t(5,r)})}function b(w){Re[w?"unshift":"push"](()=>{g=w,t(9,g),t(5,r)})}function L(w){f=w,t(6,f)}function S(w){r=w,t(5,r)}function p(w){M=w,t(16,M)}function z(w){n=w,t(0,n)}return o.$$set=w=>{"mode"in w&&t(0,n=w.mode),"i18n"in w&&t(1,i=w.i18n),"dispatch"in w&&t(2,s=w.dispatch),"dispatch_blob"in w&&t(19,a=w.dispatch_blob),"waveform_settings"in w&&t(3,u=w.waveform_settings),"handle_reset_value"in w&&t(4,l=w.handle_reset_value)},o.$$.update=()=>{o.$$.dirty&68&&f?.on("record-start",()=>{A(),t(15,D=!0),s("start_recording");let w=document.getElementById("microphone");w&&(w.style.display="block")}),o.$$.dirty&1572932&&f?.on("record-end",async w=>{t(14,y=0),t(15,D=!1),clearInterval(k),s("stop_recording");const j=await w.arrayBuffer(),re=await new AudioContext().decodeAudioData(j);re&&await Bt(re).then(async Ze=>{await a([Ze],"change")})}),o.$$.dirty&1048644&&f?.on("record-pause",()=>{s("pause_recording"),clearInterval(k)}),o.$$.dirty&64&&f?.on("record-resume",()=>{A()}),o.$$.dirty&544&&r?.on("decode",w=>{t(13,v=w),g&&t(9,g.textContent=E(w),g)}),o.$$.dirty&288&&r?.on("timeupdate",w=>_&&t(8,_.textContent=E(w),_)),o.$$.dirty&36&&r?.on("pause",()=>{s("pause"),t(11,c=!1)}),o.$$.dirty&36&&r?.on("play",()=>{s("play"),t(11,c=!0)}),o.$$.dirty&36&&r?.on("finish",()=>{s("stop"),s("end"),t(11,c=!1)}),o.$$.dirty&192&&f?.on("record-end",w=>{t(7,m=URL.createObjectURL(w));const j=document.getElementById("microphone"),oe=document.getElementById("recording");j&&(j.style.display="none"),oe&&m&&(oe.innerHTML="",T())})},[n,i,s,u,l,r,f,m,_,g,d,c,h,v,y,D,M,E,I,a,k,B,W,b,L,S,p,z]}class ys extends hs{constructor(e){super(),fs(this,e,ws,bs,_s,{mode:0,i18n:1,dispatch:2,dispatch_blob:19,waveform_settings:3,handle_reset_value:4})}}const{SvelteComponent:ks,append:ye,attr:Se,detach:Gt,element:Fe,init:Cs,insert:Yt,listen:Kn,noop:Sn,null_to_empty:Dn,safe_not_equal:Es,set_data:Qn,set_style:Mn,space:Zt,text:$n}=window.__gradio__svelte__internal,{onMount:Rs}=window.__gradio__svelte__internal;function Ss(o){let e,t,n,i=o[4]("audio.record")+"",s,a,u;return{c(){e=Fe("button"),t=Fe("span"),t.innerHTML='',n=Zt(),s=$n(i),Se(t,"class","record-icon"),Se(e,"class","record-button svelte-16e5vwh")},m(l,d){Yt(l,e,d),ye(e,t),ye(e,n),ye(e,s),a||(u=Kn(e,"click",o[8]),a=!0)},p(l,d){d&16&&i!==(i=l[4]("audio.record")+"")&&Qn(s,i)},d(l){l&&Gt(e),a=!1,u()}}}function Ds(o){let e,t,n,i=(o[1]?o[4]("audio.pause"):o[4]("audio.stop"))+"",s,a,u,l;return{c(){e=Fe("button"),t=Fe("span"),t.innerHTML=' ',n=Zt(),s=$n(i),Se(t,"class","record-icon"),Se(e,"class",a=Dn(o[1]?"stop-button-paused":"stop-button")+" svelte-16e5vwh")},m(d,r){Yt(d,e,r),ye(e,t),ye(e,n),ye(e,s),u||(l=Kn(e,"click",o[7]),u=!0)},p(d,r){r&18&&i!==(i=(d[1]?d[4]("audio.pause"):d[4]("audio.stop"))+"")&&Qn(s,i),r&2&&a!==(a=Dn(d[1]?"stop-button-paused":"stop-button")+" svelte-16e5vwh")&&Se(e,"class",a)},d(d){d&&Gt(e),u=!1,l()}}}function Ms(o){let e,t,n;function i(u,l){return u[0]?Ds:Ss}let s=i(o),a=s(o);return{c(){e=Fe("div"),t=Fe("div"),n=Zt(),a.c(),Se(t,"id","microphone"),Mn(t,"display",o[0]?"block":"none"),Se(e,"class","mic-wrap svelte-16e5vwh")},m(u,l){Yt(u,e,l),ye(e,t),ye(e,n),a.m(e,null)},p(u,[l]){l&1&&Mn(t,"display",u[0]?"block":"none"),s===(s=i(u))&&a?a.p(u,l):(a.d(1),a=s(u),a&&(a.c(),a.m(e,null)))},i:Sn,o:Sn,d(u){u&&Gt(e),a.d()}}}function Ls(o,e,t){let{recording:n=!1}=e,{paused_recording:i=!1}=e,{stop:s}=e,{record:a}=e,{i18n:u}=e,{waveform_settings:l={}}=e,d,r;Rs(()=>{c()});const c=()=>{d!==void 0&&d.destroy(),d=He.create({...l,height:100,container:"#microphone"}),t(5,r=d.registerPlugin(it.create()))},h=()=>{r.stopMic(),s()},f=()=>{r.startMic(),a()};return o.$$set=m=>{"recording"in m&&t(0,n=m.recording),"paused_recording"in m&&t(1,i=m.paused_recording),"stop"in m&&t(2,s=m.stop),"record"in m&&t(3,a=m.record),"i18n"in m&&t(4,u=m.i18n),"waveform_settings"in m&&t(6,l=m.waveform_settings)},[n,i,s,a,u,r,l,h,f]}class As extends ks{constructor(e){super(),Cs(this,e,Ls,Ms,Es,{recording:0,paused_recording:1,stop:2,record:3,i18n:4,waveform_settings:6})}}const{SvelteComponent:Ps,add_flush_callback:Jt,append:Wt,attr:$e,bind:Kt,binding_callbacks:Qt,check_outros:_t,create_component:le,create_slot:Ts,destroy_component:ue,detach:fe,element:zt,empty:$t,get_all_dirty_from_scope:Ws,get_slot_changes:zs,group_outros:pt,init:Bs,insert:me,listen:Ln,mount_component:de,noop:Os,run_all:Is,safe_not_equal:Ns,space:Ve,transition_in:q,transition_out:U,update_slot_base:Hs}=window.__gradio__svelte__internal,{onDestroy:qs,createEventDispatcher:Us}=window.__gradio__svelte__internal;function js(o){let e,t,n,i,s;e=new qt({props:{i18n:o[9],absolute:!0}}),e.$on("clear",o[17]),e.$on("edit",o[27]);function a(l){o[28](l)}let u={value:o[1],label:o[3],autoplay:o[8],i18n:o[9],dispatch:o[14],dispatch_blob:o[15],waveform_settings:o[10],handle_reset_value:o[11],interactive:!0};return o[13]!==void 0&&(u.mode=o[13]),n=new Fn({props:u}),Qt.push(()=>Kt(n,"mode",a)),{c(){le(e.$$.fragment),t=Ve(),le(n.$$.fragment)},m(l,d){de(e,l,d),me(l,t,d),de(n,l,d),s=!0},p(l,d){const r={};d[0]&512&&(r.i18n=l[9]),e.$set(r);const c={};d[0]&2&&(c.value=l[1]),d[0]&8&&(c.label=l[3]),d[0]&256&&(c.autoplay=l[8]),d[0]&512&&(c.i18n=l[9]),d[0]&1024&&(c.waveform_settings=l[10]),d[0]&2048&&(c.handle_reset_value=l[11]),!i&&d[0]&8192&&(i=!0,c.mode=l[13],Jt(()=>i=!1)),n.$set(c)},i(l){s||(q(e.$$.fragment,l),q(n.$$.fragment,l),s=!0)},o(l){U(e.$$.fragment,l),U(n.$$.fragment,l),s=!1},d(l){l&&fe(t),ue(e,l),ue(n,l)}}}function Fs(o){let e,t,n,i;const s=[Xs,Vs],a=[];function u(l,d){return l[2]==="microphone"?0:l[2]==="upload"?1:-1}return~(e=u(o))&&(t=a[e]=s[e](o)),{c(){t&&t.c(),n=$t()},m(l,d){~e&&a[e].m(l,d),me(l,n,d),i=!0},p(l,d){let r=e;e=u(l),e===r?~e&&a[e].p(l,d):(t&&(pt(),U(a[r],1,1,()=>{a[r]=null}),_t()),~e?(t=a[e],t?t.p(l,d):(t=a[e]=s[e](l),t.c()),q(t,1),t.m(n.parentNode,n)):t=null)},i(l){i||(q(t),i=!0)},o(l){U(t),i=!1},d(l){l&&fe(n),~e&&a[e].d(l)}}}function Vs(o){let e,t,n,i,s;e=new qt({props:{i18n:o[9],absolute:!0}}),e.$on("clear",o[17]);function a(l){o[26](l)}let u={filetype:"audio/aac,audio/midi,audio/mpeg,audio/ogg,audio/wav,audio/x-wav,audio/opus,audio/webm,audio/flac,audio/vnd.rn-realaudio,audio/x-ms-wma,audio/x-aiff,audio/amr,audio/*",root:o[4],$$slots:{default:[Gs]},$$scope:{ctx:o}};return o[0]!==void 0&&(u.dragging=o[0]),n=new pi({props:u}),Qt.push(()=>Kt(n,"dragging",a)),n.$on("load",o[18]),{c(){le(e.$$.fragment),t=Ve(),le(n.$$.fragment)},m(l,d){de(e,l,d),me(l,t,d),de(n,l,d),s=!0},p(l,d){const r={};d[0]&512&&(r.i18n=l[9]),e.$set(r);const c={};d[0]&16&&(c.root=l[4]),d[1]&1&&(c.$$scope={dirty:d,ctx:l}),!i&&d[0]&1&&(i=!0,c.dragging=l[0],Jt(()=>i=!1)),n.$set(c)},i(l){s||(q(e.$$.fragment,l),q(n.$$.fragment,l),s=!0)},o(l){U(e.$$.fragment,l),U(n.$$.fragment,l),s=!1},d(l){l&&fe(t),ue(e,l),ue(n,l)}}}function Xs(o){let e,t,n,i,s,a;e=new qt({props:{i18n:o[9],absolute:!0}}),e.$on("clear",o[17]);const u=[Zs,Ys],l=[];function d(r,c){return r[7]?0:1}return n=d(o),i=l[n]=u[n](o),{c(){le(e.$$.fragment),t=Ve(),i.c(),s=$t()},m(r,c){de(e,r,c),me(r,t,c),l[n].m(r,c),me(r,s,c),a=!0},p(r,c){const h={};c[0]&512&&(h.i18n=r[9]),e.$set(h);let f=n;n=d(r),n===f?l[n].p(r,c):(pt(),U(l[f],1,1,()=>{l[f]=null}),_t(),i=l[n],i?i.p(r,c):(i=l[n]=u[n](r),i.c()),q(i,1),i.m(s.parentNode,s))},i(r){a||(q(e.$$.fragment,r),q(i),a=!0)},o(r){U(e.$$.fragment,r),U(i),a=!1},d(r){r&&(fe(t),fe(s)),ue(e,r),l[n].d(r)}}}function Gs(o){let e;const t=o[24].default,n=Ts(t,o,o[31],null);return{c(){n&&n.c()},m(i,s){n&&n.m(i,s),e=!0},p(i,s){n&&n.p&&(!e||s[1]&1)&&Hs(n,t,i,i[31],e?zs(t,i[31],s,null):Ws(i[31]),null)},i(i){e||(q(n,i),e=!0)},o(i){U(n,i),e=!1},d(i){n&&n.d(i)}}}function Ys(o){let e,t,n;function i(a){o[25](a)}let s={i18n:o[9],dispatch:o[14],dispatch_blob:o[15],waveform_settings:o[10],handle_reset_value:o[11]};return o[13]!==void 0&&(s.mode=o[13]),e=new ys({props:s}),Qt.push(()=>Kt(e,"mode",i)),{c(){le(e.$$.fragment)},m(a,u){de(e,a,u),n=!0},p(a,u){const l={};u[0]&512&&(l.i18n=a[9]),u[0]&1024&&(l.waveform_settings=a[10]),u[0]&2048&&(l.handle_reset_value=a[11]),!t&&u[0]&8192&&(t=!0,l.mode=a[13],Jt(()=>t=!1)),e.$set(l)},i(a){n||(q(e.$$.fragment,a),n=!0)},o(a){U(e.$$.fragment,a),n=!1},d(a){ue(e,a)}}}function Zs(o){let e,t;return e=new As({props:{record:o[16],recording:o[12],stop:o[19],i18n:o[9],waveform_settings:o[10]}}),{c(){le(e.$$.fragment)},m(n,i){de(e,n,i),t=!0},p(n,i){const s={};i[0]&4096&&(s.recording=n[12]),i[0]&512&&(s.i18n=n[9]),i[0]&1024&&(s.waveform_settings=n[10]),e.$set(s)},i(n){t||(q(e.$$.fragment,n),t=!0)},o(n){U(e.$$.fragment,n),t=!1},d(n){ue(e,n)}}}function An(o){let e,t,n,i,s,a,u,l,d;return n=new gi({}),a=new Ui({}),{c(){e=zt("span"),t=zt("button"),le(n.$$.fragment),i=Ve(),s=zt("button"),le(a.$$.fragment),$e(t,"class","icon svelte-10shjqk"),$e(t,"aria-label","Upload audio"),$e(s,"class","icon svelte-10shjqk"),$e(s,"aria-label","Record audio"),$e(e,"class","source-selection svelte-10shjqk")},m(r,c){me(r,e,c),Wt(e,t),de(n,t,null),Wt(e,i),Wt(e,s),de(a,s,null),u=!0,l||(d=[Ln(t,"click",o[29]),Ln(s,"click",o[30])],l=!0)},p:Os,i(r){u||(q(n.$$.fragment,r),q(a.$$.fragment,r),u=!0)},o(r){U(n.$$.fragment,r),U(a.$$.fragment,r),u=!1},d(r){r&&fe(e),ue(n),ue(a),l=!1,Is(d)}}}function Js(o){let e,t,n,i,s,a,u;e=new Wn({props:{show_label:o[5],Icon:gt,float:o[2]==="upload"&&o[1]===null,label:o[3]||o[9]("audio.audio")}});const l=[Fs,js],d=[];function r(h,f){return h[1]===null||h[7]?0:1}n=r(o),i=d[n]=l[n](o);let c=o[6].length>1&&An(o);return{c(){le(e.$$.fragment),t=Ve(),i.c(),s=Ve(),c&&c.c(),a=$t()},m(h,f){de(e,h,f),me(h,t,f),d[n].m(h,f),me(h,s,f),c&&c.m(h,f),me(h,a,f),u=!0},p(h,f){const m={};f[0]&32&&(m.show_label=h[5]),f[0]&6&&(m.float=h[2]==="upload"&&h[1]===null),f[0]&520&&(m.label=h[3]||h[9]("audio.audio")),e.$set(m);let _=n;n=r(h),n===_?d[n].p(h,f):(pt(),U(d[_],1,1,()=>{d[_]=null}),_t(),i=d[n],i?i.p(h,f):(i=d[n]=l[n](h),i.c()),q(i,1),i.m(s.parentNode,s)),h[6].length>1?c?(c.p(h,f),f[0]&64&&q(c,1)):(c=An(h),c.c(),q(c,1),c.m(a.parentNode,a)):c&&(pt(),U(c,1,1,()=>{c=null}),_t())},i(h){u||(q(e.$$.fragment,h),q(i),q(c),u=!0)},o(h){U(e.$$.fragment,h),U(i),U(c),u=!1},d(h){h&&(fe(t),fe(s),fe(a)),ue(e,h),d[n].d(h),c&&c.d(h)}}}const Ks=500,Pn=44;function Qs(o,e,t){let{$$slots:n={},$$scope:i}=e,{value:s=null}=e,{label:a}=e,{root:u}=e,{show_label:l=!0}=e,{sources:d=["microphone","upload"]}=e,{pending:r=!1}=e,{streaming:c=!1}=e,{autoplay:h=!1}=e,{i18n:f}=e,{waveform_settings:m={}}=e,{dragging:_}=e,{active_source:g}=e,{handle_reset_value:v=()=>{}}=e,y=!1,k,D="",M,A=[],E=!1,P=!1,T=[],I;function B(){I=[tn(()=>import("./module-94200622.js"),["assets/module-94200622.js","assets/Index-37584f50.js","assets/index-0526d562.js","assets/index-02e0d00d.css","assets/Index-5cf1892e.css"]),tn(()=>import("./module-1791af61.js"),[])]}c&&B();const W=Us(),b=async(R,X)=>{let ce=new File(R,"audio.wav");const ke=await fi([ce],X==="stream");t(1,s=(await mi(ke,u))?.filter(Boolean)[0]),W(X,s)};qs(()=>{c&&k&&k.state!=="inactive"&&k.stop()});async function L(){let R;try{R=await navigator.mediaDevices.getUserMedia({audio:!0})}catch(X){if(!navigator.mediaDevices){W("error",f("audio.no_device_support"));return}if(X instanceof DOMException&&X.name=="NotAllowedError"){W("error",f("audio.allow_recording_access"));return}throw X}if(R!=null){if(c){const[{MediaRecorder:X,register:ce},{connect:ke}]=await Promise.all(I);await ce(await ke()),k=new X(R,{mimeType:"audio/wav"}),k.addEventListener("dataavailable",S)}else k=new MediaRecorder(R),k.addEventListener("dataavailable",X=>{T.push(X.data)}),k.addEventListener("stop",async()=>{t(12,y=!1),await b(T,"change"),await b(T,"stop_recording"),T=[]});P=!0}}async function S(R){let X=await R.data.arrayBuffer(),ce=new Uint8Array(X);if(M||(t(21,M=new Uint8Array(X.slice(0,Pn))),ce=new Uint8Array(X.slice(Pn))),r)A.push(ce);else{let ke=[M].concat(A,[ce]);b(ke,"stream"),t(22,A=[])}}async function p(){t(12,y=!0),W("start_recording"),P||await L(),t(21,M=void 0),c&&k.start(Ks)}function z(){W("change",null),W("clear"),t(13,D=""),t(1,s=null)}function w({detail:R}){t(1,s=R),W("change",R),W("upload",R)}function j(){t(12,y=!1),c&&(W("stop_recording"),k.stop(),r&&t(23,E=!0),b(T,"change"),W("clear"),t(13,D=""))}function oe(R){D=R,t(13,D)}function re(R){_=R,t(0,_)}const Ze=()=>t(13,D="edit");function kt(R){D=R,t(13,D)}const Ct=()=>{z(),t(2,g="upload")},Et=()=>{z(),t(2,g="microphone")};return o.$$set=R=>{"value"in R&&t(1,s=R.value),"label"in R&&t(3,a=R.label),"root"in R&&t(4,u=R.root),"show_label"in R&&t(5,l=R.show_label),"sources"in R&&t(6,d=R.sources),"pending"in R&&t(20,r=R.pending),"streaming"in R&&t(7,c=R.streaming),"autoplay"in R&&t(8,h=R.autoplay),"i18n"in R&&t(9,f=R.i18n),"waveform_settings"in R&&t(10,m=R.waveform_settings),"dragging"in R&&t(0,_=R.dragging),"active_source"in R&&t(2,g=R.active_source),"handle_reset_value"in R&&t(11,v=R.handle_reset_value),"$$scope"in R&&t(31,i=R.$$scope)},o.$$.update=()=>{if(o.$$.dirty[0]&1&&W("drag",_),o.$$.dirty[0]&15728640&&E&&r===!1&&(t(23,E=!1),M&&A)){let R=[M].concat(A);t(22,A=[]),b(R,"stream")}},[_,s,g,a,u,l,d,c,h,f,m,v,y,D,W,b,p,z,w,j,r,M,A,E,n,oe,re,Ze,kt,Ct,Et,i]}class $s extends Ps{constructor(e){super(),Bs(this,e,Qs,Js,Ns,{value:1,label:3,root:4,show_label:5,sources:6,pending:20,streaming:7,autoplay:8,i18n:9,waveform_settings:10,dragging:0,active_source:2,handle_reset_value:11},null,[-1,-1])}}const xs=$s,{SvelteComponent:er,add_flush_callback:tr,assign:xn,bind:nr,binding_callbacks:ir,check_outros:or,create_component:Me,destroy_component:Le,detach:xt,empty:sr,flush:N,get_spread_object:ei,get_spread_update:ti,group_outros:rr,init:ar,insert:en,mount_component:Ae,safe_not_equal:lr,space:ni,transition_in:_e,transition_out:pe}=window.__gradio__svelte__internal;function ur(o){let e,t;return e=new Tn({props:{variant:o[0]===null&&o[20]==="upload"?"dashed":"solid",border_mode:o[21]?"focus":"base",padding:!1,elem_id:o[2],elem_classes:o[3],visible:o[4],container:o[10],scale:o[11],min_width:o[12],$$slots:{default:[hr]},$$scope:{ctx:o}}}),{c(){Me(e.$$.fragment)},m(n,i){Ae(e,n,i),t=!0},p(n,i){const s={};i[0]&1048577&&(s.variant=n[0]===null&&n[20]==="upload"?"dashed":"solid"),i[0]&2097152&&(s.border_mode=n[21]?"focus":"base"),i[0]&4&&(s.elem_id=n[2]),i[0]&8&&(s.elem_classes=n[3]),i[0]&16&&(s.visible=n[4]),i[0]&1024&&(s.container=n[10]),i[0]&2048&&(s.scale=n[11]),i[0]&4096&&(s.min_width=n[12]),i[0]&4137923|i[1]&16384&&(s.$$scope={dirty:i,ctx:n}),e.$set(s)},i(n){t||(_e(e.$$.fragment,n),t=!0)},o(n){pe(e.$$.fragment,n),t=!1},d(n){Le(e,n)}}}function dr(o){let e,t;return e=new Tn({props:{variant:"solid",border_mode:o[21]?"focus":"base",padding:!1,elem_id:o[2],elem_classes:o[3],visible:o[4],container:o[10],scale:o[11],min_width:o[12],$$slots:{default:[fr]},$$scope:{ctx:o}}}),{c(){Me(e.$$.fragment)},m(n,i){Ae(e,n,i),t=!0},p(n,i){const s={};i[0]&2097152&&(s.border_mode=n[21]?"focus":"base"),i[0]&4&&(s.elem_id=n[2]),i[0]&8&&(s.elem_classes=n[3]),i[0]&16&&(s.visible=n[4]),i[0]&1024&&(s.container=n[10]),i[0]&2048&&(s.scale=n[11]),i[0]&4096&&(s.min_width=n[12]),i[0]&844418|i[1]&16384&&(s.$$scope={dirty:i,ctx:n}),e.$set(s)},i(n){t||(_e(e.$$.fragment,n),t=!0)},o(n){pe(e.$$.fragment,n),t=!1},d(n){Le(e,n)}}}function cr(o){let e,t;return e=new vi({props:{i18n:o[18].i18n,type:"audio"}}),{c(){Me(e.$$.fragment)},m(n,i){Ae(e,n,i),t=!0},p(n,i){const s={};i[0]&262144&&(s.i18n=n[18].i18n),e.$set(s)},i(n){t||(_e(e.$$.fragment,n),t=!0)},o(n){pe(e.$$.fragment,n),t=!1},d(n){Le(e,n)}}}function hr(o){let e,t,n,i,s;const a=[{autoscroll:o[18].autoscroll},{i18n:o[18].i18n},o[1]];let u={};for(let r=0;rnr(n,"dragging",l)),n.$on("change",o[31]),n.$on("stream",o[32]),n.$on("drag",o[33]),n.$on("edit",o[34]),n.$on("play",o[35]),n.$on("pause",o[36]),n.$on("stop",o[37]),n.$on("end",o[38]),n.$on("start_recording",o[39]),n.$on("pause_recording",o[40]),n.$on("stop_recording",o[41]),n.$on("upload",o[42]),n.$on("clear",o[43]),n.$on("error",o[44]),{c(){Me(e.$$.fragment),t=ni(),Me(n.$$.fragment)},m(r,c){Ae(e,r,c),en(r,t,c),Ae(n,r,c),s=!0},p(r,c){const h=c[0]&262146?ti(a,[c[0]&262144&&{autoscroll:r[18].autoscroll},c[0]&262144&&{i18n:r[18].i18n},c[0]&2&&ei(r[1])]):{};e.$set(h);const f={};c[0]&128&&(f.label=r[7]),c[0]&512&&(f.show_label=r[9]),c[0]&524288&&(f.value=r[19]),c[0]&256&&(f.root=r[8]),c[0]&64&&(f.sources=r[6]),c[0]&1048576&&(f.active_source=r[20]),c[0]&65536&&(f.pending=r[16]),c[0]&131072&&(f.streaming=r[17]),c[0]&8192&&(f.autoplay=r[13]),c[0]&262144&&(f.i18n=r[18].i18n),c[0]&262144|c[1]&16384&&(f.$$scope={dirty:c,ctx:r}),!i&&c[0]&2097152&&(i=!0,f.dragging=r[21],tr(()=>i=!1)),n.$set(f)},i(r){s||(_e(e.$$.fragment,r),_e(n.$$.fragment,r),s=!0)},o(r){pe(e.$$.fragment,r),pe(n.$$.fragment,r),s=!1},d(r){r&&xt(t),Le(e,r),Le(n,r)}}}function fr(o){let e,t,n,i;const s=[{autoscroll:o[18].autoscroll},{i18n:o[18].i18n},o[1]];let a={};for(let u=0;u{a[r]=null}),or(),t=a[e],t?t.p(l,d):(t=a[e]=s[e](l),t.c()),_e(t,1),t.m(n.parentNode,n))},i(l){i||(_e(t),i=!0)},o(l){pe(t),i=!1},d(l){l&&xt(n),a[e].d(l)}}}function _r(o,e,t){let{elem_id:n=""}=e,{elem_classes:i=[]}=e,{visible:s=!0}=e,{interactive:a}=e,{value:u=null}=e,{sources:l}=e,{label:d}=e,{root:r}=e,{show_label:c}=e,{proxy_url:h}=e,{container:f=!0}=e,{scale:m=null}=e,{min_width:_=void 0}=e,{loading_status:g}=e,{autoplay:v=!1}=e,{show_download_button:y=!0}=e,{show_share_button:k=!1}=e,{waveform_options:D={}}=e,{pending:M}=e,{streaming:A}=e,{gradio:E}=e,P=null,T,I,B=u;const W=()=>{B===null||u===B||t(0,u=B)};let b;const L={height:50,waveColor:D.waveform_color||"#9ca3af",progressColor:D.waveform_progress_color||"#f97316",barWidth:2,barGap:3,barHeight:4,cursorWidth:2,cursorColor:"#ddd5e9",barRadius:10,dragToSeek:!0,mediaControls:D.show_controls},S=C=>E.dispatch("share",C.detail),p=C=>E.dispatch("error",C.detail);function z(C){b=C,t(21,b)}const w=({detail:C})=>t(0,u=C),j=({detail:C})=>{t(0,u=C),E.dispatch("stream",u)},oe=({detail:C})=>t(21,b=C),re=()=>E.dispatch("edit"),Ze=()=>E.dispatch("play"),kt=()=>E.dispatch("pause"),Ct=()=>E.dispatch("stop"),Et=()=>E.dispatch("end"),R=()=>E.dispatch("start_recording"),X=()=>E.dispatch("pause_recording"),ce=()=>E.dispatch("stop_recording"),ke=()=>E.dispatch("upload"),ii=()=>E.dispatch("clear"),oi=({detail:C})=>{t(1,g=g||{}),t(1,g.status="error",g),E.dispatch("error",C)};return o.$$set=C=>{"elem_id"in C&&t(2,n=C.elem_id),"elem_classes"in C&&t(3,i=C.elem_classes),"visible"in C&&t(4,s=C.visible),"interactive"in C&&t(5,a=C.interactive),"value"in C&&t(0,u=C.value),"sources"in C&&t(6,l=C.sources),"label"in C&&t(7,d=C.label),"root"in C&&t(8,r=C.root),"show_label"in C&&t(9,c=C.show_label),"proxy_url"in C&&t(24,h=C.proxy_url),"container"in C&&t(10,f=C.container),"scale"in C&&t(11,m=C.scale),"min_width"in C&&t(12,_=C.min_width),"loading_status"in C&&t(1,g=C.loading_status),"autoplay"in C&&t(13,v=C.autoplay),"show_download_button"in C&&t(14,y=C.show_download_button),"show_share_button"in C&&t(15,k=C.show_share_button),"waveform_options"in C&&t(25,D=C.waveform_options),"pending"in C&&t(16,M=C.pending),"streaming"in C&&t(17,A=C.streaming),"gradio"in C&&t(18,E=C.gradio)},o.$$.update=()=>{o.$$.dirty[0]&16777473&&t(19,T=_i(u,r,h)),o.$$.dirty[0]&134217729&&u&&B===null&&t(27,B=u),o.$$.dirty[0]&67371009&&JSON.stringify(u)!==JSON.stringify(P)&&(t(26,P=u),E.dispatch("change")),o.$$.dirty[0]&64&&l&&t(20,I=l[0])},[u,g,n,i,s,a,l,d,r,c,f,m,_,v,y,k,M,A,E,T,I,b,W,L,h,D,P,B,S,p,z,w,j,oe,re,Ze,kt,Ct,Et,R,X,ce,ke,ii,oi]}class pr extends er{constructor(e){super(),ar(this,e,_r,mr,lr,{elem_id:2,elem_classes:3,visible:4,interactive:5,value:0,sources:6,label:7,root:8,show_label:9,proxy_url:24,container:10,scale:11,min_width:12,loading_status:1,autoplay:13,show_download_button:14,show_share_button:15,waveform_options:25,pending:16,streaming:17,gradio:18},null,[-1,-1])}get elem_id(){return this.$$.ctx[2]}set elem_id(e){this.$$set({elem_id:e}),N()}get elem_classes(){return this.$$.ctx[3]}set elem_classes(e){this.$$set({elem_classes:e}),N()}get visible(){return this.$$.ctx[4]}set visible(e){this.$$set({visible:e}),N()}get interactive(){return this.$$.ctx[5]}set interactive(e){this.$$set({interactive:e}),N()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),N()}get sources(){return this.$$.ctx[6]}set sources(e){this.$$set({sources:e}),N()}get label(){return this.$$.ctx[7]}set label(e){this.$$set({label:e}),N()}get root(){return this.$$.ctx[8]}set root(e){this.$$set({root:e}),N()}get show_label(){return this.$$.ctx[9]}set show_label(e){this.$$set({show_label:e}),N()}get proxy_url(){return this.$$.ctx[24]}set proxy_url(e){this.$$set({proxy_url:e}),N()}get container(){return this.$$.ctx[10]}set container(e){this.$$set({container:e}),N()}get scale(){return this.$$.ctx[11]}set scale(e){this.$$set({scale:e}),N()}get min_width(){return this.$$.ctx[12]}set min_width(e){this.$$set({min_width:e}),N()}get loading_status(){return this.$$.ctx[1]}set loading_status(e){this.$$set({loading_status:e}),N()}get autoplay(){return this.$$.ctx[13]}set autoplay(e){this.$$set({autoplay:e}),N()}get show_download_button(){return this.$$.ctx[14]}set show_download_button(e){this.$$set({show_download_button:e}),N()}get show_share_button(){return this.$$.ctx[15]}set show_share_button(e){this.$$set({show_share_button:e}),N()}get waveform_options(){return this.$$.ctx[25]}set waveform_options(e){this.$$set({waveform_options:e}),N()}get pending(){return this.$$.ctx[16]}set pending(e){this.$$set({pending:e}),N()}get streaming(){return this.$$.ctx[17]}set streaming(e){this.$$set({streaming:e}),N()}get gradio(){return this.$$.ctx[18]}set gradio(e){this.$$set({gradio:e}),N()}}const Or=pr;export{Hr as BaseExample,xs as BaseInteractiveAudio,Fn as BasePlayer,Xo as BaseStaticAudio,Or as default}; -//# sourceMappingURL=index-fef9d5f8.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f8aef4a9.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f8aef4a9.js deleted file mode 100644 index d14a477d9c67e7e54080d77d31801ae86690612b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f8aef4a9.js +++ /dev/null @@ -1,2 +0,0 @@ -import{L as e,S as m,S as p}from"./Index-c74a8b7c.js";import{T as i}from"./Blocks-f0dbd8c3.js";import"./index-50ad4c77.js";import"./svelte/svelte.js";import"./Button-8eeccca1.js";export{e as Loader,m as StatusTracker,i as Toast,p as default}; -//# sourceMappingURL=index-f8aef4a9.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/lfs.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/lfs.py deleted file mode 100644 index a40951c2a3b6a139786203dc09d28714e7194782..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/lfs.py +++ /dev/null @@ -1,202 +0,0 @@ -""" -Implementation of a custom transfer agent for the transfer type "multipart" for -git-lfs. - -Inspired by: -github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py - -Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md - - -To launch debugger while developing: - -``` [lfs "customtransfer.multipart"] -path = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678 ---wait-for-client -/path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py -lfs-multipart-upload ```""" - -import json -import os -import subprocess -import sys -from argparse import _SubParsersAction -from typing import Dict, List, Optional - -from huggingface_hub.commands import BaseHuggingfaceCLICommand -from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND, SliceFileObj - -from ..utils import get_session, hf_raise_for_status, logging - - -logger = logging.get_logger(__name__) - - -class LfsCommands(BaseHuggingfaceCLICommand): - """ - Implementation of a custom transfer agent for the transfer type "multipart" - for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom - transfer agent is: - https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md - - This introduces two commands to the CLI: - - 1. $ huggingface-cli lfs-enable-largefiles - - This should be executed once for each model repo that contains a model file - >5GB. It's documented in the error message you get if you just try to git - push a 5GB file without having enabled it before. - - 2. $ huggingface-cli lfs-multipart-upload - - This command is called by lfs directly and is not meant to be called by the - user. - """ - - @staticmethod - def register_subcommand(parser: _SubParsersAction): - enable_parser = parser.add_parser( - "lfs-enable-largefiles", - help="Configure your repository to enable upload of files > 5GB.", - ) - enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.") - enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args)) - - upload_parser = parser.add_parser( - LFS_MULTIPART_UPLOAD_COMMAND, - help="Command will get called by git-lfs, do not call it directly.", - ) - upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args)) - - -class LfsEnableCommand: - def __init__(self, args): - self.args = args - - def run(self): - local_path = os.path.abspath(self.args.path) - if not os.path.isdir(local_path): - print("This does not look like a valid git repo.") - exit(1) - subprocess.run( - "git config lfs.customtransfer.multipart.path huggingface-cli".split(), - check=True, - cwd=local_path, - ) - subprocess.run( - f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(), - check=True, - cwd=local_path, - ) - print("Local repo set up for largefiles") - - -def write_msg(msg: Dict): - """Write out the message in Line delimited JSON.""" - msg_str = json.dumps(msg) + "\n" - sys.stdout.write(msg_str) - sys.stdout.flush() - - -def read_msg() -> Optional[Dict]: - """Read Line delimited JSON from stdin.""" - msg = json.loads(sys.stdin.readline().strip()) - - if "terminate" in (msg.get("type"), msg.get("event")): - # terminate message received - return None - - if msg.get("event") not in ("download", "upload"): - logger.critical("Received unexpected message") - sys.exit(1) - - return msg - - -class LfsUploadCommand: - def __init__(self, args) -> None: - self.args = args - - def run(self) -> None: - # Immediately after invoking a custom transfer process, git-lfs - # sends initiation data to the process over stdin. - # This tells the process useful information about the configuration. - init_msg = json.loads(sys.stdin.readline().strip()) - if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"): - write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}}) - sys.exit(1) - - # The transfer process should use the information it needs from the - # initiation structure, and also perform any one-off setup tasks it - # needs to do. It should then respond on stdout with a simple empty - # confirmation structure, as follows: - write_msg({}) - - # After the initiation exchange, git-lfs will send any number of - # transfer requests to the stdin of the transfer process, in a serial sequence. - while True: - msg = read_msg() - if msg is None: - # When all transfers have been processed, git-lfs will send - # a terminate event to the stdin of the transfer process. - # On receiving this message the transfer process should - # clean up and terminate. No response is expected. - sys.exit(0) - - oid = msg["oid"] - filepath = msg["path"] - completion_url = msg["action"]["href"] - header = msg["action"]["header"] - chunk_size = int(header.pop("chunk_size")) - presigned_urls: List[str] = list(header.values()) - - # Send a "started" progress event to allow other workers to start. - # Otherwise they're delayed until first "progress" event is reported, - # i.e. after the first 5GB by default (!) - write_msg( - { - "event": "progress", - "oid": oid, - "bytesSoFar": 1, - "bytesSinceLast": 0, - } - ) - - parts = [] - with open(filepath, "rb") as file: - for i, presigned_url in enumerate(presigned_urls): - with SliceFileObj( - file, - seek_from=i * chunk_size, - read_limit=chunk_size, - ) as data: - r = get_session().put(presigned_url, data=data) - hf_raise_for_status(r) - parts.append( - { - "etag": r.headers.get("etag"), - "partNumber": i + 1, - } - ) - # In order to support progress reporting while data is uploading / downloading, - # the transfer process should post messages to stdout - write_msg( - { - "event": "progress", - "oid": oid, - "bytesSoFar": (i + 1) * chunk_size, - "bytesSinceLast": chunk_size, - } - ) - # Not precise but that's ok. - - r = get_session().post( - completion_url, - json={ - "oid": oid, - "parts": parts, - }, - ) - hf_raise_for_status(r) - - write_msg({"event": "complete", "oid": oid}) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp deleted file mode 100644 index 575eec1188275064169e7ed533535617fc849d55..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp +++ /dev/null @@ -1,170 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -struct ufunc { - std::string name; - double (*f32func)(double); - long double (*f64func)(long double); - float f32ulp; - float f64ulp; -}; - -template -T -RandomFloat(T a, T b) -{ - T random = ((T)rand()) / (T)RAND_MAX; - T diff = b - a; - T r = random * diff; - return a + r; -} - -template -void -append_random_array(std::vector &arr, T min, T max, size_t N) -{ - for (size_t ii = 0; ii < N; ++ii) - arr.emplace_back(RandomFloat(min, max)); -} - -template -std::vector -computeTrueVal(const std::vector &in, T2 (*mathfunc)(T2)) -{ - std::vector out; - for (T1 elem : in) { - T2 elem_d = (T2)elem; - T1 out_elem = (T1)mathfunc(elem_d); - out.emplace_back(out_elem); - } - return out; -} - -/* - * FP range: - * [-inf, -maxflt, -1., -minflt, -minden, 0., minden, minflt, 1., maxflt, inf] - */ - -#define MINDEN std::numeric_limits::denorm_min() -#define MINFLT std::numeric_limits::min() -#define MAXFLT std::numeric_limits::max() -#define INF std::numeric_limits::infinity() -#define qNAN std::numeric_limits::quiet_NaN() -#define sNAN std::numeric_limits::signaling_NaN() - -template -std::vector -generate_input_vector(std::string func) -{ - std::vector input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT, - -MAXFLT, INF, -INF, qNAN, sNAN, - -1.0, 1.0, 0.0, -0.0}; - - // [-1.0, 1.0] - if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) { - append_random_array(input, -1.0, 1.0, 700); - } - // (0.0, INF] - else if ((func == "log2") || (func == "log10")) { - append_random_array(input, 0.0, 1.0, 200); - append_random_array(input, MINDEN, MINFLT, 200); - append_random_array(input, MINFLT, 1.0, 200); - append_random_array(input, 1.0, MAXFLT, 200); - } - // (-1.0, INF] - else if (func == "log1p") { - append_random_array(input, -1.0, 1.0, 200); - append_random_array(input, -MINFLT, -MINDEN, 100); - append_random_array(input, -1.0, -MINFLT, 100); - append_random_array(input, MINDEN, MINFLT, 100); - append_random_array(input, MINFLT, 1.0, 100); - append_random_array(input, 1.0, MAXFLT, 100); - } - // [1.0, INF] - else if (func == "arccosh") { - append_random_array(input, 1.0, 2.0, 400); - append_random_array(input, 2.0, MAXFLT, 300); - } - // [-INF, INF] - else { - append_random_array(input, -1.0, 1.0, 100); - append_random_array(input, MINDEN, MINFLT, 100); - append_random_array(input, -MINFLT, -MINDEN, 100); - append_random_array(input, MINFLT, 1.0, 100); - append_random_array(input, -1.0, -MINFLT, 100); - append_random_array(input, 1.0, MAXFLT, 100); - append_random_array(input, -MAXFLT, -100.0, 100); - } - - std::random_shuffle(input.begin(), input.end()); - return input; -} - -int -main() -{ - srand(42); - std::vector umathfunc = { - {"sin", sin, sin, 1.49, 1.00}, - {"cos", cos, cos, 1.49, 1.00}, - {"tan", tan, tan, 3.91, 3.93}, - {"arcsin", asin, asin, 3.12, 2.55}, - {"arccos", acos, acos, 2.1, 1.67}, - {"arctan", atan, atan, 2.3, 2.52}, - {"sinh", sinh, sinh, 1.55, 1.89}, - {"cosh", cosh, cosh, 2.48, 1.97}, - {"tanh", tanh, tanh, 1.38, 1.19}, - {"arcsinh", asinh, asinh, 1.01, 1.48}, - {"arccosh", acosh, acosh, 1.16, 1.05}, - {"arctanh", atanh, atanh, 1.45, 1.46}, - {"cbrt", cbrt, cbrt, 1.94, 1.82}, - //{"exp",exp,exp,3.76,1.53}, - {"exp2", exp2, exp2, 1.01, 1.04}, - {"expm1", expm1, expm1, 2.62, 2.1}, - //{"log",log,log,1.84,1.67}, - {"log10", log10, log10, 3.5, 1.92}, - {"log1p", log1p, log1p, 1.96, 1.93}, - {"log2", log2, log2, 2.12, 1.84}, - }; - - for (int ii = 0; ii < umathfunc.size(); ++ii) { - // ignore sin/cos - if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) { - std::string fileName = - "umath-validation-set-" + umathfunc[ii].name + ".csv"; - std::ofstream txtOut; - txtOut.open(fileName, std::ofstream::trunc); - txtOut << "dtype,input,output,ulperrortol" << std::endl; - - // Single Precision - auto f32in = generate_input_vector(umathfunc[ii].name); - auto f32out = computeTrueVal(f32in, - umathfunc[ii].f32func); - for (int jj = 0; jj < f32in.size(); ++jj) { - txtOut << "np.float32" << std::hex << ",0x" - << *reinterpret_cast(&f32in[jj]) << ",0x" - << *reinterpret_cast(&f32out[jj]) << "," - << ceil(umathfunc[ii].f32ulp) << std::endl; - } - - // Double Precision - auto f64in = generate_input_vector(umathfunc[ii].name); - auto f64out = computeTrueVal( - f64in, umathfunc[ii].f64func); - for (int jj = 0; jj < f64in.size(); ++jj) { - txtOut << "np.float64" << std::hex << ",0x" - << *reinterpret_cast(&f64in[jj]) << ",0x" - << *reinterpret_cast(&f64out[jj]) << "," - << ceil(umathfunc[ii].f64ulp) << std::endl; - } - txtOut.close(); - } - } - return 0; -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py deleted file mode 100644 index 7899b4aeac3fdef6548f3aadf76ff7718418f089..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py +++ /dev/null @@ -1,285 +0,0 @@ -import datetime - -import dateutil -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - Series, -) -import pandas._testing as tm - - -class TestDataFrameMissingData: - def test_dropEmptyRows(self, float_frame): - N = len(float_frame.index) - mat = np.random.default_rng(2).standard_normal(N) - mat[:5] = np.nan - - frame = DataFrame({"foo": mat}, index=float_frame.index) - original = Series(mat, index=float_frame.index, name="foo") - expected = original.dropna() - inplace_frame1, inplace_frame2 = frame.copy(), frame.copy() - - smaller_frame = frame.dropna(how="all") - # check that original was preserved - tm.assert_series_equal(frame["foo"], original) - return_value = inplace_frame1.dropna(how="all", inplace=True) - tm.assert_series_equal(smaller_frame["foo"], expected) - tm.assert_series_equal(inplace_frame1["foo"], expected) - assert return_value is None - - smaller_frame = frame.dropna(how="all", subset=["foo"]) - return_value = inplace_frame2.dropna(how="all", subset=["foo"], inplace=True) - tm.assert_series_equal(smaller_frame["foo"], expected) - tm.assert_series_equal(inplace_frame2["foo"], expected) - assert return_value is None - - def test_dropIncompleteRows(self, float_frame): - N = len(float_frame.index) - mat = np.random.default_rng(2).standard_normal(N) - mat[:5] = np.nan - - frame = DataFrame({"foo": mat}, index=float_frame.index) - frame["bar"] = 5 - original = Series(mat, index=float_frame.index, name="foo") - inp_frame1, inp_frame2 = frame.copy(), frame.copy() - - smaller_frame = frame.dropna() - tm.assert_series_equal(frame["foo"], original) - return_value = inp_frame1.dropna(inplace=True) - - exp = Series(mat[5:], index=float_frame.index[5:], name="foo") - tm.assert_series_equal(smaller_frame["foo"], exp) - tm.assert_series_equal(inp_frame1["foo"], exp) - assert return_value is None - - samesize_frame = frame.dropna(subset=["bar"]) - tm.assert_series_equal(frame["foo"], original) - assert (frame["bar"] == 5).all() - return_value = inp_frame2.dropna(subset=["bar"], inplace=True) - tm.assert_index_equal(samesize_frame.index, float_frame.index) - tm.assert_index_equal(inp_frame2.index, float_frame.index) - assert return_value is None - - def test_dropna(self): - df = DataFrame(np.random.default_rng(2).standard_normal((6, 4))) - df.iloc[:2, 2] = np.nan - - dropped = df.dropna(axis=1) - expected = df.loc[:, [0, 1, 3]] - inp = df.copy() - return_value = inp.dropna(axis=1, inplace=True) - tm.assert_frame_equal(dropped, expected) - tm.assert_frame_equal(inp, expected) - assert return_value is None - - dropped = df.dropna(axis=0) - expected = df.loc[list(range(2, 6))] - inp = df.copy() - return_value = inp.dropna(axis=0, inplace=True) - tm.assert_frame_equal(dropped, expected) - tm.assert_frame_equal(inp, expected) - assert return_value is None - - # threshold - dropped = df.dropna(axis=1, thresh=5) - expected = df.loc[:, [0, 1, 3]] - inp = df.copy() - return_value = inp.dropna(axis=1, thresh=5, inplace=True) - tm.assert_frame_equal(dropped, expected) - tm.assert_frame_equal(inp, expected) - assert return_value is None - - dropped = df.dropna(axis=0, thresh=4) - expected = df.loc[range(2, 6)] - inp = df.copy() - return_value = inp.dropna(axis=0, thresh=4, inplace=True) - tm.assert_frame_equal(dropped, expected) - tm.assert_frame_equal(inp, expected) - assert return_value is None - - dropped = df.dropna(axis=1, thresh=4) - tm.assert_frame_equal(dropped, df) - - dropped = df.dropna(axis=1, thresh=3) - tm.assert_frame_equal(dropped, df) - - # subset - dropped = df.dropna(axis=0, subset=[0, 1, 3]) - inp = df.copy() - return_value = inp.dropna(axis=0, subset=[0, 1, 3], inplace=True) - tm.assert_frame_equal(dropped, df) - tm.assert_frame_equal(inp, df) - assert return_value is None - - # all - dropped = df.dropna(axis=1, how="all") - tm.assert_frame_equal(dropped, df) - - df[2] = np.nan - dropped = df.dropna(axis=1, how="all") - expected = df.loc[:, [0, 1, 3]] - tm.assert_frame_equal(dropped, expected) - - # bad input - msg = "No axis named 3 for object type DataFrame" - with pytest.raises(ValueError, match=msg): - df.dropna(axis=3) - - def test_drop_and_dropna_caching(self): - # tst that cacher updates - original = Series([1, 2, np.nan], name="A") - expected = Series([1, 2], dtype=original.dtype, name="A") - df = DataFrame({"A": original.values.copy()}) - df2 = df.copy() - df["A"].dropna() - tm.assert_series_equal(df["A"], original) - - ser = df["A"] - return_value = ser.dropna(inplace=True) - tm.assert_series_equal(ser, expected) - tm.assert_series_equal(df["A"], original) - assert return_value is None - - df2["A"].drop([1]) - tm.assert_series_equal(df2["A"], original) - - ser = df2["A"] - return_value = ser.drop([1], inplace=True) - tm.assert_series_equal(ser, original.drop([1])) - tm.assert_series_equal(df2["A"], original) - assert return_value is None - - def test_dropna_corner(self, float_frame): - # bad input - msg = "invalid how option: foo" - with pytest.raises(ValueError, match=msg): - float_frame.dropna(how="foo") - # non-existent column - 8303 - with pytest.raises(KeyError, match=r"^\['X'\]$"): - float_frame.dropna(subset=["A", "X"]) - - def test_dropna_multiple_axes(self): - df = DataFrame( - [ - [1, np.nan, 2, 3], - [4, np.nan, 5, 6], - [np.nan, np.nan, np.nan, np.nan], - [7, np.nan, 8, 9], - ] - ) - - # GH20987 - with pytest.raises(TypeError, match="supplying multiple axes"): - df.dropna(how="all", axis=[0, 1]) - with pytest.raises(TypeError, match="supplying multiple axes"): - df.dropna(how="all", axis=(0, 1)) - - inp = df.copy() - with pytest.raises(TypeError, match="supplying multiple axes"): - inp.dropna(how="all", axis=(0, 1), inplace=True) - - def test_dropna_tz_aware_datetime(self): - # GH13407 - df = DataFrame() - dt1 = datetime.datetime(2015, 1, 1, tzinfo=dateutil.tz.tzutc()) - dt2 = datetime.datetime(2015, 2, 2, tzinfo=dateutil.tz.tzutc()) - df["Time"] = [dt1] - result = df.dropna(axis=0) - expected = DataFrame({"Time": [dt1]}) - tm.assert_frame_equal(result, expected) - - # Ex2 - df = DataFrame({"Time": [dt1, None, np.nan, dt2]}) - result = df.dropna(axis=0) - expected = DataFrame([dt1, dt2], columns=["Time"], index=[0, 3]) - tm.assert_frame_equal(result, expected) - - def test_dropna_categorical_interval_index(self): - # GH 25087 - ii = pd.IntervalIndex.from_breaks([0, 2.78, 3.14, 6.28]) - ci = pd.CategoricalIndex(ii) - df = DataFrame({"A": list("abc")}, index=ci) - - expected = df - result = df.dropna() - tm.assert_frame_equal(result, expected) - - def test_dropna_with_duplicate_columns(self): - df = DataFrame( - { - "A": np.random.default_rng(2).standard_normal(5), - "B": np.random.default_rng(2).standard_normal(5), - "C": np.random.default_rng(2).standard_normal(5), - "D": ["a", "b", "c", "d", "e"], - } - ) - df.iloc[2, [0, 1, 2]] = np.nan - df.iloc[0, 0] = np.nan - df.iloc[1, 1] = np.nan - df.iloc[:, 3] = np.nan - expected = df.dropna(subset=["A", "B", "C"], how="all") - expected.columns = ["A", "A", "B", "C"] - - df.columns = ["A", "A", "B", "C"] - - result = df.dropna(subset=["A", "C"], how="all") - tm.assert_frame_equal(result, expected) - - def test_set_single_column_subset(self): - # GH 41021 - df = DataFrame({"A": [1, 2, 3], "B": list("abc"), "C": [4, np.nan, 5]}) - expected = DataFrame( - {"A": [1, 3], "B": list("ac"), "C": [4.0, 5.0]}, index=[0, 2] - ) - result = df.dropna(subset="C") - tm.assert_frame_equal(result, expected) - - def test_single_column_not_present_in_axis(self): - # GH 41021 - df = DataFrame({"A": [1, 2, 3]}) - - # Column not present - with pytest.raises(KeyError, match="['D']"): - df.dropna(subset="D", axis=0) - - def test_subset_is_nparray(self): - # GH 41021 - df = DataFrame({"A": [1, 2, np.nan], "B": list("abc"), "C": [4, np.nan, 5]}) - expected = DataFrame({"A": [1.0], "B": ["a"], "C": [4.0]}) - result = df.dropna(subset=np.array(["A", "C"])) - tm.assert_frame_equal(result, expected) - - def test_no_nans_in_frame(self, axis): - # GH#41965 - df = DataFrame([[1, 2], [3, 4]], columns=pd.RangeIndex(0, 2)) - expected = df.copy() - result = df.dropna(axis=axis) - tm.assert_frame_equal(result, expected, check_index_type=True) - - def test_how_thresh_param_incompatible(self): - # GH46575 - df = DataFrame([1, 2, pd.NA]) - msg = "You cannot set both the how and thresh arguments at the same time" - with pytest.raises(TypeError, match=msg): - df.dropna(how="all", thresh=2) - - with pytest.raises(TypeError, match=msg): - df.dropna(how="any", thresh=2) - - with pytest.raises(TypeError, match=msg): - df.dropna(how=None, thresh=None) - - @pytest.mark.parametrize("val", [1, 1.5]) - def test_dropna_ignore_index(self, val): - # GH#31725 - df = DataFrame({"a": [1, 2, val]}, index=[3, 2, 1]) - result = df.dropna(ignore_index=True) - expected = DataFrame({"a": [1, 2, val]}) - tm.assert_frame_equal(result, expected) - - df.dropna(ignore_index=True, inplace=True) - tm.assert_frame_equal(df, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_interval.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_interval.py deleted file mode 100644 index 192aaacbac2b56c6f9bbd3970cd0c4a210ddf035..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_interval.py +++ /dev/null @@ -1,279 +0,0 @@ -import numpy as np -import pytest - -from pandas import ( - Interval, - Period, - Timedelta, - Timestamp, -) -import pandas._testing as tm -import pandas.core.common as com - - -@pytest.fixture -def interval(): - return Interval(0, 1) - - -class TestInterval: - def test_properties(self, interval): - assert interval.closed == "right" - assert interval.left == 0 - assert interval.right == 1 - assert interval.mid == 0.5 - - def test_repr(self, interval): - assert repr(interval) == "Interval(0, 1, closed='right')" - assert str(interval) == "(0, 1]" - - interval_left = Interval(0, 1, closed="left") - assert repr(interval_left) == "Interval(0, 1, closed='left')" - assert str(interval_left) == "[0, 1)" - - def test_contains(self, interval): - assert 0.5 in interval - assert 1 in interval - assert 0 not in interval - - interval_both = Interval(0, 1, "both") - assert 0 in interval_both - assert 1 in interval_both - - interval_neither = Interval(0, 1, closed="neither") - assert 0 not in interval_neither - assert 0.5 in interval_neither - assert 1 not in interval_neither - - def test_equal(self): - assert Interval(0, 1) == Interval(0, 1, closed="right") - assert Interval(0, 1) != Interval(0, 1, closed="left") - assert Interval(0, 1) != 0 - - def test_comparison(self): - msg = ( - "'<' not supported between instances of " - "'pandas._libs.interval.Interval' and 'int'" - ) - with pytest.raises(TypeError, match=msg): - Interval(0, 1) < 2 - - assert Interval(0, 1) < Interval(1, 2) - assert Interval(0, 1) < Interval(0, 2) - assert Interval(0, 1) < Interval(0.5, 1.5) - assert Interval(0, 1) <= Interval(0, 1) - assert Interval(0, 1) > Interval(-1, 2) - assert Interval(0, 1) >= Interval(0, 1) - - def test_hash(self, interval): - # should not raise - hash(interval) - - @pytest.mark.parametrize( - "left, right, expected", - [ - (0, 5, 5), - (-2, 5.5, 7.5), - (10, 10, 0), - (10, np.inf, np.inf), - (-np.inf, -5, np.inf), - (-np.inf, np.inf, np.inf), - (Timedelta("0 days"), Timedelta("5 days"), Timedelta("5 days")), - (Timedelta("10 days"), Timedelta("10 days"), Timedelta("0 days")), - (Timedelta("1H10min"), Timedelta("5H5min"), Timedelta("3H55min")), - (Timedelta("5S"), Timedelta("1H"), Timedelta("59min55S")), - ], - ) - def test_length(self, left, right, expected): - # GH 18789 - iv = Interval(left, right) - result = iv.length - assert result == expected - - @pytest.mark.parametrize( - "left, right, expected", - [ - ("2017-01-01", "2017-01-06", "5 days"), - ("2017-01-01", "2017-01-01 12:00:00", "12 hours"), - ("2017-01-01 12:00", "2017-01-01 12:00:00", "0 days"), - ("2017-01-01 12:01", "2017-01-05 17:31:00", "4 days 5 hours 30 min"), - ], - ) - @pytest.mark.parametrize("tz", (None, "UTC", "CET", "US/Eastern")) - def test_length_timestamp(self, tz, left, right, expected): - # GH 18789 - iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz)) - result = iv.length - expected = Timedelta(expected) - assert result == expected - - @pytest.mark.parametrize( - "left, right", - [ - (0, 1), - (Timedelta("0 days"), Timedelta("1 day")), - (Timestamp("2018-01-01"), Timestamp("2018-01-02")), - ( - Timestamp("2018-01-01", tz="US/Eastern"), - Timestamp("2018-01-02", tz="US/Eastern"), - ), - ], - ) - def test_is_empty(self, left, right, closed): - # GH27219 - # non-empty always return False - iv = Interval(left, right, closed) - assert iv.is_empty is False - - # same endpoint is empty except when closed='both' (contains one point) - iv = Interval(left, left, closed) - result = iv.is_empty - expected = closed != "both" - assert result is expected - - @pytest.mark.parametrize( - "left, right", - [ - ("a", "z"), - (("a", "b"), ("c", "d")), - (list("AB"), list("ab")), - (Interval(0, 1), Interval(1, 2)), - (Period("2018Q1", freq="Q"), Period("2018Q1", freq="Q")), - ], - ) - def test_construct_errors(self, left, right): - # GH 23013 - msg = "Only numeric, Timestamp and Timedelta endpoints are allowed" - with pytest.raises(ValueError, match=msg): - Interval(left, right) - - def test_math_add(self, closed): - interval = Interval(0, 1, closed=closed) - expected = Interval(1, 2, closed=closed) - - result = interval + 1 - assert result == expected - - result = 1 + interval - assert result == expected - - result = interval - result += 1 - assert result == expected - - msg = r"unsupported operand type\(s\) for \+" - with pytest.raises(TypeError, match=msg): - interval + interval - - with pytest.raises(TypeError, match=msg): - interval + "foo" - - def test_math_sub(self, closed): - interval = Interval(0, 1, closed=closed) - expected = Interval(-1, 0, closed=closed) - - result = interval - 1 - assert result == expected - - result = interval - result -= 1 - assert result == expected - - msg = r"unsupported operand type\(s\) for -" - with pytest.raises(TypeError, match=msg): - interval - interval - - with pytest.raises(TypeError, match=msg): - interval - "foo" - - def test_math_mult(self, closed): - interval = Interval(0, 1, closed=closed) - expected = Interval(0, 2, closed=closed) - - result = interval * 2 - assert result == expected - - result = 2 * interval - assert result == expected - - result = interval - result *= 2 - assert result == expected - - msg = r"unsupported operand type\(s\) for \*" - with pytest.raises(TypeError, match=msg): - interval * interval - - msg = r"can\'t multiply sequence by non-int" - with pytest.raises(TypeError, match=msg): - interval * "foo" - - def test_math_div(self, closed): - interval = Interval(0, 1, closed=closed) - expected = Interval(0, 0.5, closed=closed) - - result = interval / 2.0 - assert result == expected - - result = interval - result /= 2.0 - assert result == expected - - msg = r"unsupported operand type\(s\) for /" - with pytest.raises(TypeError, match=msg): - interval / interval - - with pytest.raises(TypeError, match=msg): - interval / "foo" - - def test_math_floordiv(self, closed): - interval = Interval(1, 2, closed=closed) - expected = Interval(0, 1, closed=closed) - - result = interval // 2 - assert result == expected - - result = interval - result //= 2 - assert result == expected - - msg = r"unsupported operand type\(s\) for //" - with pytest.raises(TypeError, match=msg): - interval // interval - - with pytest.raises(TypeError, match=msg): - interval // "foo" - - def test_constructor_errors(self): - msg = "invalid option for 'closed': foo" - with pytest.raises(ValueError, match=msg): - Interval(0, 1, closed="foo") - - msg = "left side of interval must be <= right side" - with pytest.raises(ValueError, match=msg): - Interval(1, 0) - - @pytest.mark.parametrize( - "tz_left, tz_right", [(None, "UTC"), ("UTC", None), ("UTC", "US/Eastern")] - ) - def test_constructor_errors_tz(self, tz_left, tz_right): - # GH 18538 - left = Timestamp("2017-01-01", tz=tz_left) - right = Timestamp("2017-01-02", tz=tz_right) - - if com.any_none(tz_left, tz_right): - error = TypeError - msg = "Cannot compare tz-naive and tz-aware timestamps" - else: - error = ValueError - msg = "left and right must have the same time zone" - with pytest.raises(error, match=msg): - Interval(left, right) - - def test_equality_comparison_broadcasts_over_array(self): - # https://github.com/pandas-dev/pandas/issues/35931 - interval = Interval(0, 1) - arr = np.array([interval, interval]) - result = interval == arr - expected = np.array([True, True]) - tm.assert_numpy_array_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/toolz/tests/test_dicttoolz.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/toolz/tests/test_dicttoolz.py deleted file mode 100644 index d45cd6cf0c89c04ad78e2bd10e590fcb720b0b63..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/toolz/tests/test_dicttoolz.py +++ /dev/null @@ -1,270 +0,0 @@ -from collections import defaultdict as _defaultdict -from collections.abc import Mapping -import os -from toolz.dicttoolz import (merge, merge_with, valmap, keymap, update_in, - assoc, dissoc, keyfilter, valfilter, itemmap, - itemfilter, assoc_in) -from toolz.functoolz import identity -from toolz.utils import raises - - -def inc(x): - return x + 1 - - -def iseven(i): - return i % 2 == 0 - - -class TestDict(object): - """Test typical usage: dict inputs, no factory keyword. - - Class attributes: - D: callable that inputs a dict and creates or returns a MutableMapping - kw: kwargs dict to specify "factory" keyword (if applicable) - """ - D = dict - kw = {} - - def test_merge(self): - D, kw = self.D, self.kw - assert merge(D({1: 1, 2: 2}), D({3: 4}), **kw) == D({1: 1, 2: 2, 3: 4}) - - def test_merge_iterable_arg(self): - D, kw = self.D, self.kw - assert merge([D({1: 1, 2: 2}), D({3: 4})], **kw) == D({1: 1, 2: 2, 3: 4}) - - def test_merge_with(self): - D, kw = self.D, self.kw - dicts = D({1: 1, 2: 2}), D({1: 10, 2: 20}) - assert merge_with(sum, *dicts, **kw) == D({1: 11, 2: 22}) - assert merge_with(tuple, *dicts, **kw) == D({1: (1, 10), 2: (2, 20)}) - - dicts = D({1: 1, 2: 2, 3: 3}), D({1: 10, 2: 20}) - assert merge_with(sum, *dicts, **kw) == D({1: 11, 2: 22, 3: 3}) - assert merge_with(tuple, *dicts, **kw) == D({1: (1, 10), 2: (2, 20), 3: (3,)}) - - assert not merge_with(sum) - - def test_merge_with_iterable_arg(self): - D, kw = self.D, self.kw - dicts = D({1: 1, 2: 2}), D({1: 10, 2: 20}) - assert merge_with(sum, *dicts, **kw) == D({1: 11, 2: 22}) - assert merge_with(sum, dicts, **kw) == D({1: 11, 2: 22}) - assert merge_with(sum, iter(dicts), **kw) == D({1: 11, 2: 22}) - - def test_valmap(self): - D, kw = self.D, self.kw - assert valmap(inc, D({1: 1, 2: 2}), **kw) == D({1: 2, 2: 3}) - - def test_keymap(self): - D, kw = self.D, self.kw - assert keymap(inc, D({1: 1, 2: 2}), **kw) == D({2: 1, 3: 2}) - - def test_itemmap(self): - D, kw = self.D, self.kw - assert itemmap(reversed, D({1: 2, 2: 4}), **kw) == D({2: 1, 4: 2}) - - def test_valfilter(self): - D, kw = self.D, self.kw - assert valfilter(iseven, D({1: 2, 2: 3}), **kw) == D({1: 2}) - - def test_keyfilter(self): - D, kw = self.D, self.kw - assert keyfilter(iseven, D({1: 2, 2: 3}), **kw) == D({2: 3}) - - def test_itemfilter(self): - D, kw = self.D, self.kw - assert itemfilter(lambda item: iseven(item[0]), D({1: 2, 2: 3}), **kw) == D({2: 3}) - assert itemfilter(lambda item: iseven(item[1]), D({1: 2, 2: 3}), **kw) == D({1: 2}) - - def test_assoc(self): - D, kw = self.D, self.kw - assert assoc(D({}), "a", 1, **kw) == D({"a": 1}) - assert assoc(D({"a": 1}), "a", 3, **kw) == D({"a": 3}) - assert assoc(D({"a": 1}), "b", 3, **kw) == D({"a": 1, "b": 3}) - - # Verify immutability: - d = D({'x': 1}) - oldd = d - assoc(d, 'x', 2, **kw) - assert d is oldd - - def test_dissoc(self): - D, kw = self.D, self.kw - assert dissoc(D({"a": 1}), "a", **kw) == D({}) - assert dissoc(D({"a": 1, "b": 2}), "a", **kw) == D({"b": 2}) - assert dissoc(D({"a": 1, "b": 2}), "b", **kw) == D({"a": 1}) - assert dissoc(D({"a": 1, "b": 2}), "a", "b", **kw) == D({}) - assert dissoc(D({"a": 1}), "a", **kw) == dissoc(dissoc(D({"a": 1}), "a", **kw), "a", **kw) - - # Verify immutability: - d = D({'x': 1}) - oldd = d - d2 = dissoc(d, 'x', **kw) - assert d is oldd - assert d2 is not oldd - - def test_assoc_in(self): - D, kw = self.D, self.kw - assert assoc_in(D({"a": 1}), ["a"], 2, **kw) == D({"a": 2}) - assert (assoc_in(D({"a": D({"b": 1})}), ["a", "b"], 2, **kw) == - D({"a": D({"b": 2})})) - assert assoc_in(D({}), ["a", "b"], 1, **kw) == D({"a": D({"b": 1})}) - - # Verify immutability: - d = D({'x': 1}) - oldd = d - d2 = assoc_in(d, ['x'], 2, **kw) - assert d is oldd - assert d2 is not oldd - - def test_update_in(self): - D, kw = self.D, self.kw - assert update_in(D({"a": 0}), ["a"], inc, **kw) == D({"a": 1}) - assert update_in(D({"a": 0, "b": 1}), ["b"], str, **kw) == D({"a": 0, "b": "1"}) - assert (update_in(D({"t": 1, "v": D({"a": 0})}), ["v", "a"], inc, **kw) == - D({"t": 1, "v": D({"a": 1})})) - # Handle one missing key. - assert update_in(D({}), ["z"], str, None, **kw) == D({"z": "None"}) - assert update_in(D({}), ["z"], inc, 0, **kw) == D({"z": 1}) - assert update_in(D({}), ["z"], lambda x: x+"ar", default="b", **kw) == D({"z": "bar"}) - # Same semantics as Clojure for multiple missing keys, ie. recursively - # create nested empty dictionaries to the depth specified by the - # keys with the innermost value set to f(default). - assert update_in(D({}), [0, 1], inc, default=-1, **kw) == D({0: D({1: 0})}) - assert update_in(D({}), [0, 1], str, default=100, **kw) == D({0: D({1: "100"})}) - assert (update_in(D({"foo": "bar", 1: 50}), ["d", 1, 0], str, 20, **kw) == - D({"foo": "bar", 1: 50, "d": D({1: D({0: "20"})})})) - # Verify immutability: - d = D({'x': 1}) - oldd = d - update_in(d, ['x'], inc, **kw) - assert d is oldd - - def test_factory(self): - D, kw = self.D, self.kw - assert merge(defaultdict(int, D({1: 2})), D({2: 3})) == {1: 2, 2: 3} - assert (merge(defaultdict(int, D({1: 2})), D({2: 3}), - factory=lambda: defaultdict(int)) == - defaultdict(int, D({1: 2, 2: 3}))) - assert not (merge(defaultdict(int, D({1: 2})), D({2: 3}), - factory=lambda: defaultdict(int)) == {1: 2, 2: 3}) - assert raises(TypeError, lambda: merge(D({1: 2}), D({2: 3}), factoryy=dict)) - - -class defaultdict(_defaultdict): - def __eq__(self, other): - return (super(defaultdict, self).__eq__(other) and - isinstance(other, _defaultdict) and - self.default_factory == other.default_factory) - - -class TestDefaultDict(TestDict): - """Test defaultdict as input and factory - - Class attributes: - D: callable that inputs a dict and creates or returns a MutableMapping - kw: kwargs dict to specify "factory" keyword (if applicable) - """ - @staticmethod - def D(dict_): - return defaultdict(int, dict_) - - kw = {'factory': lambda: defaultdict(int)} - - -class CustomMapping(object): - """Define methods of the MutableMapping protocol required by dicttoolz""" - def __init__(self, *args, **kwargs): - self._d = dict(*args, **kwargs) - - def __getitem__(self, key): - return self._d[key] - - def __setitem__(self, key, val): - self._d[key] = val - - def __delitem__(self, key): - del self._d[key] - - def __iter__(self): - return iter(self._d) - - def __len__(self): - return len(self._d) - - def __contains__(self, key): - return key in self._d - - def __eq__(self, other): - return isinstance(other, CustomMapping) and self._d == other._d - - def __ne__(self, other): - return not isinstance(other, CustomMapping) or self._d != other._d - - def keys(self): - return self._d.keys() - - def values(self): - return self._d.values() - - def items(self): - return self._d.items() - - def update(self, *args, **kwargs): - self._d.update(*args, **kwargs) - - # Unused methods that are part of the MutableMapping protocol - #def get(self, key, *args): - # return self._d.get(key, *args) - - #def pop(self, key, *args): - # return self._d.pop(key, *args) - - #def popitem(self, key): - # return self._d.popitem() - - #def clear(self): - # self._d.clear() - - #def setdefault(self, key, *args): - # return self._d.setdefault(self, key, *args) - - -class TestCustomMapping(TestDict): - """Test CustomMapping as input and factory - - Class attributes: - D: callable that inputs a dict and creates or returns a MutableMapping - kw: kwargs dict to specify "factory" keyword (if applicable) - """ - D = CustomMapping - kw = {'factory': lambda: CustomMapping()} - - -def test_environ(): - # See: https://github.com/pytoolz/cytoolz/issues/127 - assert keymap(identity, os.environ) == os.environ - assert valmap(identity, os.environ) == os.environ - assert itemmap(identity, os.environ) == os.environ - - -def test_merge_with_non_dict_mappings(): - class Foo(Mapping): - def __init__(self, d): - self.d = d - - def __iter__(self): - return iter(self.d) - - def __getitem__(self, key): - return self.d[key] - - def __len__(self): - return len(self.d) - - d = Foo({1: 1}) - - assert merge(d) is d or merge(d) == {1: 1} - assert merge_with(sum, d) == {1: 1} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tzdata/zoneinfo/Chile/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tzdata/zoneinfo/Chile/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/pyInter/Liyuu_sovits4/preprocess_flist_config.py b/spaces/pyInter/Liyuu_sovits4/preprocess_flist_config.py deleted file mode 100644 index 6e3dd0bd9390a509c282bbde4ff2631ac94404e4..0000000000000000000000000000000000000000 --- a/spaces/pyInter/Liyuu_sovits4/preprocess_flist_config.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -import argparse -import re - -from tqdm import tqdm -from random import shuffle -import json - -config_template = json.load(open("configs/config.json")) - -pattern = re.compile(r'^[\.a-zA-Z0-9_\/]+$') - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list") - parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list") - parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list") - parser.add_argument("--source_dir", type=str, default="./dataset/44k", help="path to source dir") - args = parser.parse_args() - - train = [] - val = [] - test = [] - idx = 0 - spk_dict = {} - spk_id = 0 - for speaker in tqdm(os.listdir(args.source_dir)): - spk_dict[speaker] = spk_id - spk_id += 1 - wavs = ["/".join([args.source_dir, speaker, i]) for i in os.listdir(os.path.join(args.source_dir, speaker))] - for wavpath in wavs: - if not pattern.match(wavpath): - print(f"warning:文件名{wavpath}中包含非字母数字下划线,可能会导致错误。(也可能不会)") - if len(wavs) < 10: - print(f"warning:{speaker}数据集数量小于10条,请补充数据") - wavs = [i for i in wavs if i.endswith("wav")] - shuffle(wavs) - train += wavs[2:-2] - val += wavs[:2] - test += wavs[-2:] - - shuffle(train) - shuffle(val) - shuffle(test) - - print("Writing", args.train_list) - with open(args.train_list, "w") as f: - for fname in tqdm(train): - wavpath = fname - f.write(wavpath + "\n") - - print("Writing", args.val_list) - with open(args.val_list, "w") as f: - for fname in tqdm(val): - wavpath = fname - f.write(wavpath + "\n") - - print("Writing", args.test_list) - with open(args.test_list, "w") as f: - for fname in tqdm(test): - wavpath = fname - f.write(wavpath + "\n") - - config_template["spk"] = spk_dict - print("Writing configs/config.json") - with open("configs/config.json", "w") as f: - json.dump(config_template, f, indent=2) diff --git a/spaces/pycoming/bingo/src/components/chat-list.tsx b/spaces/pycoming/bingo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/pycoming/bingo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
      - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
      - ) -} diff --git a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py b/spaces/qingxu98/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py deleted file mode 100644 index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py +++ /dev/null @@ -1,245 +0,0 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import numpy as np -import torch as th -from torch.nn import functional as F - -from stable_baselines3.common import logger -from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm -from stable_baselines3.common.preprocessing import maybe_transpose -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update -from stable_baselines3.dqn.policies import DQNPolicy - - -class DQN(OffPolicyAlgorithm): - """ - Deep Q-Network (DQN) - - Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236 - Default hyperparameters are taken from the nature paper, - except for the optimizer and learning rate that were taken from Stable Baselines defaults. - - :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) - :param env: The environment to learn from (if registered in Gym, can be str) - :param learning_rate: The learning rate, it can be a function - of the current progress remaining (from 1 to 0) - :param buffer_size: size of the replay buffer - :param learning_starts: how many steps of the model to collect transitions for before learning starts - :param batch_size: Minibatch size for each gradient update - :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update - :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit - like ``(5, "step")`` or ``(2, "episode")``. - :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) - Set to ``-1`` means to do as many gradient steps as steps done in the environment - during the rollout. - :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer - at a cost of more complexity. - See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 - :param target_update_interval: update the target network every ``target_update_interval`` - environment steps. - :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced - :param exploration_initial_eps: initial value of random action probability - :param exploration_final_eps: final value of random action probability - :param max_grad_norm: The maximum value for the gradient clipping - :param tensorboard_log: the log location for tensorboard (if None, no logging) - :param create_eval_env: Whether to create a second environment that will be - used for evaluating the agent periodically. (Only available when passing string for the environment) - :param policy_kwargs: additional arguments to be passed to the policy on creation - :param verbose: the verbosity level: 0 no output, 1 info, 2 debug - :param seed: Seed for the pseudo random generators - :param device: Device (cpu, cuda, ...) on which the code should be run. - Setting it to auto, the code will be run on the GPU if possible. - :param _init_setup_model: Whether or not to build the network at the creation of the instance - """ - - def __init__( - self, - policy: Union[str, Type[DQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, - learning_starts: int = 50000, - batch_size: Optional[int] = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 0, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): - - super(DQN, self).__init__( - policy, - env, - DQNPolicy, - learning_rate, - buffer_size, - learning_starts, - batch_size, - tau, - gamma, - train_freq, - gradient_steps, - action_noise=None, # No action noise - policy_kwargs=policy_kwargs, - tensorboard_log=tensorboard_log, - verbose=verbose, - device=device, - create_eval_env=create_eval_env, - seed=seed, - sde_support=False, - optimize_memory_usage=optimize_memory_usage, - supported_action_spaces=(gym.spaces.Discrete,), - ) - - self.exploration_initial_eps = exploration_initial_eps - self.exploration_final_eps = exploration_final_eps - self.exploration_fraction = exploration_fraction - self.target_update_interval = target_update_interval - self.max_grad_norm = max_grad_norm - # "epsilon" for the epsilon-greedy exploration - self.exploration_rate = 0.0 - # Linear schedule will be defined in `_setup_model()` - self.exploration_schedule = None - self.q_net, self.q_net_target = None, None - - if _init_setup_model: - self._setup_model() - - def _setup_model(self) -> None: - super(DQN, self)._setup_model() - self._create_aliases() - self.exploration_schedule = get_linear_fn( - self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction - ) - - def _create_aliases(self) -> None: - self.q_net = self.policy.q_net - self.q_net_target = self.policy.q_net_target - - def _on_step(self) -> None: - """ - Update the exploration rate and target network if needed. - This method is called in ``collect_rollouts()`` after each step in the environment. - """ - if self.num_timesteps % self.target_update_interval == 0: - polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau) - - self.exploration_rate = self.exploration_schedule(self._current_progress_remaining) - logger.record("rollout/exploration rate", self.exploration_rate) - - def train(self, gradient_steps: int, batch_size: int = 100) -> None: - # Update learning rate according to schedule - self._update_learning_rate(self.policy.optimizer) - - losses = [] - for _ in range(gradient_steps): - # Sample replay buffer - replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) - - with th.no_grad(): - # Compute the next Q-values using the target network - next_q_values = self.q_net_target(replay_data.next_observations) - # Follow greedy policy: use the one with the highest value - next_q_values, _ = next_q_values.max(dim=1) - # Avoid potential broadcast issue - next_q_values = next_q_values.reshape(-1, 1) - # 1-step TD target - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates - current_q_values = self.q_net(replay_data.observations) - - # Retrieve the q-values for the actions from the replay buffer - current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) - - # Compute Huber loss (less sensitive to outliers) - loss = F.smooth_l1_loss(current_q_values, target_q_values) - losses.append(loss.item()) - - # Optimize the policy - self.policy.optimizer.zero_grad() - loss.backward() - # Clip gradient norm - th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) - self.policy.optimizer.step() - - # Increase update counter - self._n_updates += gradient_steps - - logger.record("train/n_updates", self._n_updates, exclude="tensorboard") - logger.record("train/loss", np.mean(losses)) - - def predict( - self, - observation: np.ndarray, - state: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, - deterministic: bool = False, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - """ - Overrides the base_class predict function to include epsilon-greedy exploration. - - :param observation: the input observation - :param state: The last states (can be None, used in recurrent policies) - :param mask: The last masks (can be None, used in recurrent policies) - :param deterministic: Whether or not to return deterministic actions. - :return: the model's action and the next state - (used in recurrent policies) - """ - if not deterministic and np.random.rand() < self.exploration_rate: - if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space): - n_batch = observation.shape[0] - action = np.array([self.action_space.sample() for _ in range(n_batch)]) - else: - action = np.array(self.action_space.sample()) - else: - action, state = self.policy.predict(observation, state, mask, deterministic) - return action, state - - def learn( - self, - total_timesteps: int, - callback: MaybeCallback = None, - log_interval: int = 4, - eval_env: Optional[GymEnv] = None, - eval_freq: int = -1, - n_eval_episodes: int = 5, - tb_log_name: str = "DQN", - eval_log_path: Optional[str] = None, - reset_num_timesteps: bool = True, - ) -> OffPolicyAlgorithm: - - return super(DQN, self).learn( - total_timesteps=total_timesteps, - callback=callback, - log_interval=log_interval, - eval_env=eval_env, - eval_freq=eval_freq, - n_eval_episodes=n_eval_episodes, - tb_log_name=tb_log_name, - eval_log_path=eval_log_path, - reset_num_timesteps=reset_num_timesteps, - ) - - def _excluded_save_params(self) -> List[str]: - return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"] - - def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: - state_dicts = ["policy", "policy.optimizer"] - - return state_dicts, [] diff --git a/spaces/quidiaMuxgu/Expedit-SAM/CCleaner 5.0 Serial Key Licence Full Version Free Download.md b/spaces/quidiaMuxgu/Expedit-SAM/CCleaner 5.0 Serial Key Licence Full Version Free Download.md deleted file mode 100644 index 0848ae6d53a9491a7289ac51ed5739aac8d9a5a7..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/CCleaner 5.0 Serial Key Licence Full Version Free Download.md +++ /dev/null @@ -1,12 +0,0 @@ -

      CCleaner 5.0 Serial Key Licence Full version Free Download


      Downloadhttps://geags.com/2uCsnN



      -
      -6 days ago — CCleaner Pro Crack 5.90 With Serial License Key 2022 Keygen JKBNX-DKNVB- . CCleaner Professional Crack With License Key 2022 and Serial Keygen. -CCleaner Premium Crack With License Key 2020 - CCleaner Professional License Key. .. -CCleaner Crack With License Key 2020 free download. -Crack CCleaner Pro 5.90 With Serial Number Free Download - CCleaner Professional Crack with license key, and ccleaner pro key. -Cracked version of CCleaner Professional License Key. -Free Download CCleaner Professional 6.10.6162 Crack Full Activation Keygen. . -CCleaner Pro Crack 5.90. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Cricket Batting Tips In Tamil Pdf 26.md b/spaces/quidiaMuxgu/Expedit-SAM/Cricket Batting Tips In Tamil Pdf 26.md deleted file mode 100644 index b5a05bff7b0a9e5fddc3b121a90fc503d54200f3..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Cricket Batting Tips In Tamil Pdf 26.md +++ /dev/null @@ -1,9 +0,0 @@ -

      Cricket Batting Tips In Tamil Pdf 26


      Downloadhttps://geags.com/2uCrjo



      - -June 7, 2019 - With driving ranges and bowling machines, the academy also offers players residence accommodation. On the other hand, it also provides . The academy can offer you a wide variety of sports competitions, including football, basketball, volleyball, rugby, badminton, tennis and football competitions (among them). You can also access . -The academy mainly focuses on professional opportunities, especially for beginners. -You can play on a professional or amateur basis and all players must be over 18 years of age. -If you are a beginner, you must sign up for training and take a test. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Histologia Basica Junqueira E Carneiro 11 Ed.md b/spaces/quidiaMuxgu/Expedit-SAM/Histologia Basica Junqueira E Carneiro 11 Ed.md deleted file mode 100644 index f1b9754ddab66648a52ef16df11bc8528875e9c9..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Histologia Basica Junqueira E Carneiro 11 Ed.md +++ /dev/null @@ -1,100 +0,0 @@ -
      -

      Histologia Basica Junqueira e Carneiro 11 ed: Um Livro Essencial para o Estudo da Histologia

      - -

      Histologia é a ciência que estuda a estrutura, a composição e as funções dos tecidos do corpo humano. A histologia é fundamental para o entendimento da fisiologia, da patologia e da medicina em geral. Para estudar a histologia, é preciso ter um bom livro que apresente os conceitos, as imagens e os exemplos de forma clara, didática e atualizada.

      -

      histologia basica junqueira e carneiro 11 ed


      Downloadhttps://geags.com/2uCqfV



      - -

      Um dos livros mais usados e recomendados pelos estudantes e professores de histologia é o Histologia Basica Junqueira e Carneiro 11 ed. Esse livro é uma obra clássica que foi lançada pela primeira vez em 1972 e que já teve mais de 20 edições em vários idiomas. O livro foi escrito pelos renomados professores Luiz Carlos Uchôa Junqueira e José Carneiro, que foram pioneiros no ensino e na pesquisa em histologia no Brasil.

      - -

      O que você vai encontrar no Histologia Basica Junqueira e Carneiro 11 ed?

      - -

      O Histologia Basica Junqueira e Carneiro 11 ed é um livro que aborda todos os aspectos da histologia humana, desde os conceitos básicos até os mais avançados. O livro é dividido em quatro partes:

      - -
        -
      • A primeira parte trata dos métodos de estudo da histologia, como a microscopia óptica, a microscopia eletrônica, a imunohistoquímica, a citometria de fluxo, entre outros.
      • -
      • A segunda parte trata dos tecidos básicos do corpo humano, como o tecido epitelial, o tecido conjuntivo, o tecido muscular, o tecido nervoso e o tecido hematopoiético.
      • -
      • A terceira parte trata dos sistemas orgânicos do corpo humano, como o sistema cardiovascular, o sistema respiratório, o sistema digestório, o sistema urinário, o sistema endócrino, o sistema reprodutor, entre outros.
      • -
      • A quarta parte trata da embriologia humana, desde a fecundação até o nascimento.
      • -
      - -

      O livro conta com mais de 800 páginas de texto e mais de 1000 ilustrações de alta qualidade, incluindo fotos coloridas de microscopia óptica e eletrônica, esquemas explicativos, tabelas comparativas e quadros resumos. O livro também conta com um atlas de histologia com mais de 200 lâminas histológicas comentadas.

      - -

      Quais são os diferenciais do Histologia Basica Junqueira e Carneiro 11 ed?

      - -

      O Histologia Basica Junqueira e Carneiro 11 ed é um livro que se destaca por vários motivos. Alguns deles são:

      - -
        -
      • É um livro atualizado que acompanha as novas descobertas e as novas tecnologias da área da histologia.
      • -
      • É um livro completo que abrange todos os temas relevantes da histologia humana.
      • -
      • É um livro didático que explica os conceitos de forma clara, objetiva e com exemplos práticos.
      • -
      • É um livro ilustrado que apresenta as imagens de forma nítida, colorida e com legendas detalhadas.
      • -
      • É um livro interativo que oferece recursos adicionais online, como vídeos, animações, exercícios, testes e casos clínicos.
      • -
      - -

      Como adquirir o Histologia Basica Junqueira e Carneiro 11 ed?

      - -

      Se você quer adquirir o Histologia Basica Junqueira e Carneiro 11 ed, você pode fazer isso de várias formas. Você pode comprar o livro impresso em livrarias físicas ou online. Você pode comprar o livro digital em formato PDF ou ePub em plataformas digitais. Você pode baixar o livro gratuitamente em sites confiáveis ou em redes sociais. Você pode acessar o livro online em sites oficiais ou em bibliotecas virtuais.

      -

      - -

      O importante é que você tenha acesso a esse livro que é uma referência na área da histologia e que vai te ajudar a aprender e a se aprofundar nesse assunto tão fascinante. O Histologia Basica Junqueira e Carneiro 11 ed é um livro que vale a pena ter na sua estante ou no seu dispositivo.

      - -

      Conclusão

      - -

      Em conclusão, o Histologia Basica Junqueira e Carneiro 11 ed é um livro essencial para o estudo da histologia humana. Ele é um livro atualizado, completo, didático, ilustrado e interativo que aborda todos os aspectos da histologia desde os métodos de estudo até a embriologia. Ele é um livro que foi escrito por professores renomados que foram pioneiros no ensino e na pesquisa em histologia no Brasil. Ele é um livro que está disponível em vários formatos e em vários meios para facilitar o seu acesso. Se você quer aprender mais sobre a histologia humana, não deixe de adquirir o Histologia Basica Junqueira e Carneiro 11 ed.

      -

      How to see histological images with Histologia Basica Junqueira e Carneiro 11 ed?

      - -

      One of the best features of Histologia Basica Junqueira e Carneiro 11 ed is that it provides you with a lot of histological images that illustrate the structure and function of the different tissues and organs of the human body. These images are taken from microscopes and show the details and characteristics of the cells, tissues, and organs at different magnifications and resolutions.

      - -

      To see histological images with Histologia Basica Junqueira e Carneiro 11 ed, you have several options. You can:

      - -
        -
      • Look at the images that are printed in the book. The book has more than 1000 illustrations that are arranged in a logical and sequential order according to the topics and chapters of the book.
      • -
      • Look at the images that are included in the atlas of histology. The book comes with an atlas of histology that has more than 200 histological slides that are commented and explained by the authors.
      • -
      • Look at the images that are available online. The book has a website that offers you access to more than 500 histological images that are interactive and zoomable. You can also find videos, animations, exercises, tests, and cases online.
      • -
      • Look at the images that are provided by other sources. You can also find histological images from other books, websites, databases, or applications that are related to histology or anatomy.
      • -
      - -

      By seeing histological images with Histologia Basica Junqueira e Carneiro 11 ed, you can improve your understanding and appreciation of histology. You can also compare and contrast different tissues and organs and learn how they work together to maintain the health and function of the human body.

      -

      What are some tips or tricks for interpreting histological images?

      - -

      Interpreting histological images is not an easy task. It requires a lot of knowledge, skill, and practice. However, there are some tips or tricks that can help you improve your ability to interpret histological images. Here are some of them:

      - -
        -
      • Know the basics of histology. Before you try to interpret histological images, you need to have a solid foundation of the basic concepts and principles of histology. You need to know the types, functions, and characteristics of the different cells, tissues, and organs of the human body.
      • -
      • Know the methods of histology. You also need to know how histological images are obtained and processed. You need to know the types, advantages, and disadvantages of the different methods of histology, such as microtomy, staining, fixation, embedding, etc.
      • -
      • Know the tools of histology. You also need to know how to use the tools that are available for viewing and analyzing histological images. You need to know how to use microscopes, cameras, computers, software, etc.
      • -
      • Know the patterns of histology. You also need to know how to recognize and identify the patterns that are common in histological images. You need to know how to distinguish between normal and abnormal tissues and organs, how to classify tumors and lesions, how to correlate structure and function, etc.
      • -
      • Know the sources of histology. You also need to know where to find reliable and relevant sources of information and reference for histological images. You need to know how to use books, websites, databases, journals, etc.
      • -
      - -

      By following these tips or tricks, you can improve your ability to interpret histological images and gain more insight and understanding of histology.

      -

      What are some challenges or limitations of interpreting histological images?

      - -

      Interpreting histological images is not a simple or straightforward task. It involves a lot of challenges and limitations that can affect the accuracy and reliability of your interpretation. Here are some of them:

      - -
        -
      • The quality of histological images depends on many factors, such as the preparation, fixation, staining, cutting, mounting, and preservation of the tissue samples, as well as the type, resolution, and calibration of the microscope and camera used to capture the images.
      • -
      • The interpretation of histological images requires a lot of experience and expertise, as well as a good knowledge of anatomy, physiology, pathology, and histology. It also requires a lot of attention and concentration, as well as a good eye for detail and pattern recognition.
      • -
      • The interpretation of histological images can be influenced by subjective factors, such as personal bias, preference, expectation, or emotion. It can also be influenced by external factors, such as peer pressure, time pressure, or ethical issues.
      • -
      • The interpretation of histological images can be affected by errors or mistakes, such as misidentification, misclassification, misdiagnosis, or miscommunication. It can also be affected by uncertainties or ambiguities, such as variations, anomalies, artifacts, or inconsistencies.
      • -
      • The interpretation of histological images can be challenged or disputed by other sources of information or evidence, such as clinical data, laboratory tests, genetic tests, or imaging techniques.
      • -
      - -

      By being aware of these challenges and limitations, you can improve your ability to interpret histological images and avoid potential pitfalls and problems.

      -

      What are some solutions or strategies for overcoming these challenges or limitations?

      - -

      Despite the challenges and limitations of interpreting histological images, there are some solutions or strategies that can help you overcome them and improve your interpretation skills. Here are some of them:

      - -
        -
      • Improve the quality of histological images by following the best practices and standards for preparing, processing, and preserving tissue samples, as well as using high-quality and well-calibrated equipment and software.
      • -
      • Improve your knowledge and expertise in histology by studying the theory and practice of histology from reliable and updated sources, such as books, websites, journals, etc. You can also consult with experts or peers who have more experience or knowledge in histology.
      • -
      • Improve your objectivity and accuracy in histology by avoiding or minimizing subjective or external factors that can influence your interpretation, such as bias, preference, expectation, emotion, pressure, or ethics. You can also use tools or methods that can help you reduce errors or uncertainties, such as checklists, guidelines, algorithms, etc.
      • -
      • Improve your validation and verification in histology by comparing or contrasting your interpretation with other sources of information or evidence that can support or challenge your interpretation, such as clinical data, laboratory tests, genetic tests, or imaging techniques. You can also seek feedback or review from other experts or peers who can provide you with constructive criticism or suggestions.
      • -
      - -

      By applying these solutions or strategies, you can overcome the challenges and limitations of interpreting histological images and enhance your interpretation skills and confidence.

      -

      Conclusion

      - -

      In conclusion, histologia basica junqueira e carneiro 11 ed is a book that is essential for the study of human histology. It is a book that is updated, complete, didactic, illustrated, and interactive that covers all aspects of histology from the methods of study to embryology. It is a book that was written by renowned professors who were pioneers in teaching and research in histology in Brazil. It is a book that is available in various formats and media to facilitate your access. If you want to learn more about human histology, do not miss the opportunity to acquire histologia basica junqueira e carneiro 11 ed.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Mpc Essentials For Pc Cracked Free 21 LINK.md b/spaces/quidiaMuxgu/Expedit-SAM/Mpc Essentials For Pc Cracked Free 21 LINK.md deleted file mode 100644 index f2700a9547860ef7192e6e4b9a58cc1ef9929be6..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Mpc Essentials For Pc Cracked Free 21 LINK.md +++ /dev/null @@ -1,64 +0,0 @@ -

      mpc essentials for pc cracked free 21


      Download ->>->>->> https://geags.com/2uCqXh



      - --11-07 - - fr? - - :) - - kjg: #ubuntu-fr - - or #ubuntu-fr-newbie - - or something like that - - either french or french-newbie - - I'm not sure - - ok - - how do I check that my bios is up to date? - - I have to run ubuntu in safe mode. that means I can't check any files - - oi - - hey - - what is the problem? - - (besides "yikes, what does ubuntu still use Xorg for!) - - a new version of xorg :) - - mgedmin, i am trying to install and its telling me that the file I am trying to install is not found - - DShepherd: what file? - - xserver-xorg - - what is the error message? - - jrib, what files are you working on right now? - - mgedmin, wait.. i will send a pic - - thanks - - try this: ls -l xserver-xorg | grep ^-rw - - the actual error message is more informative - - but the most important file that's missing is xserver-xorg.initramfs - - the initramfs is what your hardware uses to boot up the kernel - - -rw-r--r-- 1 root root 14116870856 2007-10-29 10:15 xserver-xorg-core - - -rw-r--r-- 1 root root 14852130576 2007-10-29 13:24 xserver-xorg-core_7.0.15-1ubuntu1_i386.deb - - -rw-r--r-- 1 root root 6558 2007-10-29 13:25 xserver-xorg-driver-i810_1.6.3-3ubuntu 4fefd39f24
      -
      -
      -

      diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Battle Los Angeles PC Activation Code Where to Find and Redeem It.md b/spaces/raedeXanto/academic-chatgpt-beta/Battle Los Angeles PC Activation Code Where to Find and Redeem It.md deleted file mode 100644 index f4950d08c4b3fa3801b5098ae01d295e6b446ce9..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Battle Los Angeles PC Activation Code Where to Find and Redeem It.md +++ /dev/null @@ -1,103 +0,0 @@ -
      -

      Battle Los Angeles PC Activation Code: What You Need to Know

      -

      If you are a fan of first-person shooter games, you might have heard of Battle Los Angeles, a game based on the 2011 movie of the same name. In this game, you play as a US Marine who fights against an alien invasion in Los Angeles. The game features realistic graphics, intense combat, and various weapons and vehicles to use.

      -

      However, before you can enjoy this game on your PC, you need to have an activation code that verifies your purchase and allows you to install and play the game. Without an activation code, you won't be able to access the game at all.

      -

      battle los angeles pc activation code


      Downloadhttps://tinourl.com/2uL4D7



      -

      So, how do you get an activation code for Battle Los Angeles? And what are the risks and challenges of using one? In this article, we will answer these questions and provide you with some methods to find or generate an activation code for this game.

      -

      Methods to Find or Generate an Activation Code

      -

      There are three main methods that you can use to get an activation code for Battle Los Angeles. Each method has its own advantages and disadvantages, so you should choose the one that suits your needs and preferences.

      -

      Method 1: Buy the game from an official source

      -

      The most straightforward and reliable way to get an activation code is to buy the game from an official source, such as Steam, Amazon, or Konami (the publisher of the game). When you buy the game from these sources, you will receive an activation code along with your purchase confirmation. You can then use this code to install and play the game on your PC.

      -

      battle los angeles game product key
      -how to activate battle los angeles on steam
      -battle los angeles pc game serial number
      -battle los angeles crack and activation key generator
      -where to find battle los angeles cd key
      -battle los angeles pc game license code
      -battle los angeles steam key free
      -how to get battle los angeles for free on pc
      -battle los angeles pc game activation key download
      -battle los angeles full version with crack and keygen
      -battle los angeles pc game registration code
      -how to install battle los angeles without cd
      -battle los angeles pc game unlock code
      -battle los angeles skidrow activation key
      -how to play battle los angeles online
      -battle los angeles pc game activation code generator
      -how to bypass battle los angeles activation
      -battle los angeles reloaded activation key
      -how to fix battle los angeles error code 1305
      -battle los angeles pc game activation key free
      -how to update battle los angeles on pc
      -battle los angeles razor1911 activation key
      -how to remove battle los angeles activation window
      -battle los angeles pc game serial key free download
      -how to change language in battle los angeles pc game
      -battle los angeles pc game crack and keygen download
      -how to run battle los angeles on windows 10
      -battle los angeles pc game product key generator
      -how to uninstall battle los angeles from pc
      -battle los angeles pc game activation code free download
      -how to mod battle los angeles pc game
      -battle los angeles pc game keygen and crack free download
      -how to increase fps in battle los angeles pc game
      -battle los angeles pc game serial number and activation code
      -how to save progress in battle los angeles pc game
      -battle los angeles pc game crack and activation code download
      -how to use cheats in battle los angeles pc game
      -battle los angeles pc game license key free download
      -how to make a backup of battle los angeles pc game
      -battle los angeles pc game registration key free download
      -how to change resolution in battle los angeles pc game
      -battle los angeles pc game unlock code free download
      -how to enable subtitles in battle los angeles pc game
      -battle los angeles steam activation code free download
      -how to connect a controller to battle los angeles pc game
      -battle los angeles origin activation code free download
      -how to create a shortcut for battle los angeles pc game
      -battle los angeles uplay activation code free download
      -how to verify integrity of files for battle los angeles pc game

      -

      The advantage of this method is that you will get a legitimate and valid code that works without any issues. You will also support the developers and publishers of the game, who deserve to be compensated for their work. Additionally, you will be able to access updates, patches, and online features of the game without any problems.

      -

      The disadvantage of this method is that you will have to pay for the game, which might not be affordable or convenient for some people. The price of the game varies depending on the source and region, but it usually ranges from $10 to $20. You will also need a stable internet connection and enough disk space to download and install the game.

      -

      Method 2: Use a key generator tool

      -

      Another way to get an activation code is to use a key generator tool, which is a software program that creates random codes for various games and software. You can find many key generator tools online, such as KeyGenNinja, SerialBay, or AllKeygensDownload. These tools claim to generate working codes for Battle Los Angeles and other games.

      -

      The advantage of this method is that you don't have to pay anything for the code, as these tools are usually free or require a small donation. You also don't need an internet connection or disk space to use these tools, as they run offline and are usually small in size.

      -

      The disadvantage of this method is that it is very risky and unreliable. Most of these tools are illegal and violate the terms and conditions of the game. Using them can result in legal issues and penalties, such as fines or lawsuits. Moreover, many of these tools are infected with malware and viruses that can harm your PC and compromise your personal data. Furthermore, many of these codes are invalid or expired, meaning that they won't work or will stop working after a while.

      -

      Method 3: Search online for free codes

      -

      The third way to get an activation code is to search online for free codes that other people have shared or posted. You can find many websites, forums, blogs, videos, or social media posts that offer free codes for Battle Los Angeles and other games. Some examples are ChapterCheats, YouTube, or PDFDrive. These sources claim to provide working codes for Battle Los Angeles that you can use without paying anything.

      -

      The advantage of this method is that you don't have to pay anything for the code, as these sources are free and accessible. You also don't need any software or tool to use these codes, as they are already generated and ready to use.

      -

      The disadvantage of this method is that it is also very risky and unreliable. Like the previous method, most of these sources are illegal and violate the terms and conditions of the game. Using them can result in legal issues and penalties, such as fines or lawsuits. Moreover, many of these sources are infected with malware and viruses that can harm your PC and compromise your personal data. Furthermore, many of these codes are invalid or expired, meaning that they won't work or will stop working after a while.

      -

      Risks and Challenges of Using an Activation Code

      -

      As you can see from the above methods, getting an activation code for Battle Los Angeles is not easy or safe. There are many risks and challenges involved in using an activation code for this game. Here are some of them:

      -

      Risk 1: Legal issues and penalties

      -

      Using an activation code that is not obtained from an official source is considered piracy and theft. It violates the intellectual property rights of the developers and publishers of the game. It also breaches the end-user license agreement (EULA) that you agree to when you install the game. By using an illegal activation code, you are exposing yourself to legal issues and penalties, such as fines or lawsuits. You could also face criminal charges or imprisonment in some countries.

      -

      Risk 2: Malware and viruses

      -

      Using an activation code that is obtained from an untrusted source is dangerous for your PC and personal data. Many sources that offer free or generated codes are infected with malware and viruses that can harm your PC and compromise your personal data. These malware and viruses can steal your identity, passwords, bank accounts, credit cards, or other sensitive information. They can also damage your files, programs, system settings, or hardware components. They can also make your PC slow, unstable, or unusable.

      -

      Risk 3: Invalid or expired codes

      - or because they are detected and blocked by the game's security system. When this happens, you won't be able to install or play the game at all. You will also waste your time and effort trying to find or generate another code that works.

      -

      Conclusion

      -

      In conclusion, getting an activation code for Battle Los Angeles is not a simple or safe task. There are three main methods that you can use to get an activation code: buying the game from an official source, using a key generator tool, or searching online for free codes. However, each method has its own risks and challenges, such as legal issues and penalties, malware and viruses, or invalid or expired codes. Therefore, you should be careful and cautious when using an activation code for this game.

      -

      Here are some recommendations and tips that we suggest you follow:

      -
        -
      • The best and safest way to get an activation code is to buy the game from an official source, such as Steam, Amazon, or Konami. This way, you will get a legitimate and valid code that works without any issues. You will also support the developers and publishers of the game, who deserve to be compensated for their work. Additionally, you will be able to access updates, patches, and online features of the game without any problems.
      • -
      • If you decide to use a key generator tool or search online for free codes, make sure that you use a trusted and reputable source that has positive reviews and feedback from other users. You should also scan the source and the code with a reliable antivirus program before using them. You should also backup your PC and personal data before installing or playing the game.
      • -
      • If you encounter any problems or errors with the activation code or the game, you should contact the customer support of the game or the source that provided you with the code. They might be able to help you solve the issue or provide you with a replacement code.
      • -
      -

      We hope that this article has helped you understand what you need to know about Battle Los Angeles PC activation code. If you have any questions or comments, please feel free to leave them below. Thank you for reading and have fun playing!

      -

      FAQs

      -

      Here are some frequently asked questions about Battle Los Angeles PC activation code:

      -
        -
      1. What is Battle Los Angeles?
        -Battle Los Angeles is a first-person shooter game based on the 2011 movie of the same name. In this game, you play as a US Marine who fights against an alien invasion in Los Angeles. The game features realistic graphics, intense combat, and various weapons and vehicles to use.
      2. -
      3. Why do I need an activation code for Battle Los Angeles?
        -You need an activation code for Battle Los Angeles to verify your purchase and allow you to install and play the game on your PC. Without an activation code, you won't be able to access the game at all.
      4. -
      5. How do I get an activation code for Battle Los Angeles?
        -There are three main methods that you can use to get an activation code for Battle Los Angeles: buying the game from an official source, using a key generator tool, or searching online for free codes. However, each method has its own risks and challenges, such as legal issues and penalties, malware and viruses, or invalid or expired codes.
      6. -
      7. What are the risks and challenges of using an activation code for Battle Los Angeles?
        -The risks and challenges of using an activation code for Battle Los Angeles are: legal issues and penalties, malware and viruses, or invalid or expired codes. These risks and challenges can result in fines or lawsuits, harm your PC and personal data, or prevent you from installing or playing the game.
      8. -
      9. What are some tips and recommendations for using an activation code for Battle Los Angeles?
        -Some tips and recommendations for using an activation code for Battle Los Angeles are: buy the game from an official source, use a trusted and reputable source for free or generated codes, scan the source and the code with a reliable antivirus program, backup your PC and personal data before installing or playing the game, and contact customer support if you encounter any problems or errors.
      10. -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Charlie Houpert - Charisma University What You Need to Know to Stand Out Connect and Succeed.md b/spaces/raedeXanto/academic-chatgpt-beta/Charlie Houpert - Charisma University What You Need to Know to Stand Out Connect and Succeed.md deleted file mode 100644 index d386e9edc5489d14515f7bcdd637875beea811a5..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Charlie Houpert - Charisma University What You Need to Know to Stand Out Connect and Succeed.md +++ /dev/null @@ -1,92 +0,0 @@ -
      -

      Charlie Houpert - Charisma University

      -

      Do you want to improve your social skills and become more charismatic? Do you want to make a great impression on anyone you meet, connect with people easily, tell captivating stories, and lead with influence? If you answered yes, then you might be interested in Charisma University, an online course created by Charlie Houpert, the co-founder of Charisma on Command.

      -

      What is Charisma University and who is Charlie Houpert?

      -

      Charisma University is an online course on social skills and charisma

      -

      Charisma University is a 6-week step-by-step program designed to give you all the tools you need to take your charisma to the next level. It's over 10 hours of the best training material available, based on scientific research, real-life examples, and practical exercises. It covers everything from making a great first impression, to building confidence, to mastering conversation skills, to telling captivating stories, to developing a magnetic presence, to leading and influencing others with charisma.

      -

      Charlie Houpert - Charisma University


      Download Zip ✦✦✦ https://tinourl.com/2uKZti



      -

      Charlie Houpert is the co-founder of Charisma on Command, a YouTube channel and a book on charisma

      -

      Charlie Houpert is the co-founder of Charisma on Command, a YouTube channel with over 4 million subscribers that teaches people how to be more charismatic in any situation. He is also the author of a book with the same name, which has been praised by celebrities like Will Smith and Tony Robbins. Charlie has a background in philosophy and he used to be an introverted and shy person who struggled with social anxiety. He decided to change his life by studying charisma and social skills, and he went from being voted shyest person in his high school class to becoming a leading speaker at one of New York City's biggest personal development meetups. He has also traveled the world, lived in different countries, dated beautiful women, and inspired thousands of people with his charisma.

      -

      What are the benefits of taking Charisma University?

      -

      You will learn how to make a great first impression by hitting four key emotions

      -

      The first rule of making a great first impression is to not try to impress. Instead, you should focus on making the other person feel four key emotions: positivity, trust, respect, and interest. In Charisma University, you will learn how to do this by using body language, eye contact, vocal tonality, humor, compliments, and more. You will also learn how to avoid common mistakes that ruin your first impression, such as being too needy, boring, arrogant, or nervous.

      -

      You will learn how to build confidence and overcome shyness and anxiety

      -

      Confidence is one of the most important aspects of charisma. Without confidence, you will not be able to express yourself fully, connect with others authentically, or take action on your goals. In Charisma University, you will learn how to build confidence from the inside out by changing your mindset, beliefs, habits, and behaviors. You will also learn how to overcome shyness and anxiety by facing your fears, challenging your negative thoughts, and using techniques such as visualization, affirmations, breathing exercises, and more.

      -

      You will learn how to master conversation skills and connect with anyone

      -

      Conversation skills are essential for building rapport, trust, and likability with anyone you meet. In Charisma University, you will learn how to start conversations with anyone confidently, how to keep conversations going smoothly, how to avoid awkward silences, how to ask engaging questions, how to listen actively, how to show empathy, how to use humor, how to flirt, how to deal with difficult people, and more.

      -

      You will learn how to tell captivating stories that make people listen

      -

      Stories are powerful tools for communicating your personality, values, experiences, and emotions. They can also entertain, educate, inspire, and persuade others. In Charisma University, you will learn how to tell captivating stories that make people listen by using storytelling techniques such as structure, conflict, emotion, sensory details, humor, and more. You will also learn how to adapt your stories for different audiences, situations, and purposes.

      -

      You will learn how to develop a magnetic presence that attracts attention and respect

      -

      Presence is the ability to be fully present in the moment, to be aware of yourself and others, and to project your energy outward. When you have a magnetic presence, you attract attention and respect from others. You also feel more confident, happy, and alive. In Charisma University, you will learn how to develop a magnetic presence by using techniques such as posture, movement, eye contact, vocal projection, smiling, and more. You will also learn how to use your presence for different effects such as dominance, warmth, mystery, and more.

      -

      You will learn how to lead and influence others with charisma

      -

      Charisma is not only useful for making friends and having fun. It's also useful for leading and influencing others in your personal and professional life. In Charisma University, you will learn how to lead and influence others with charisma by using techniques such as framing, persuasion, negotiation, motivation, inspiration, and more. You will also learn how to deal with common challenges such as conflict resolution, criticism, rejection, and more.

      -

      What are the features of Charisma University?

      -

      Charisma University is a 6-week step-by-step program with over 10 hours of video content

      -

      Charisma University is divided into six modules that cover each aspect of charisma in detail. Each module consists of several video lessons that explain the concepts and demonstrate them with real-life examples. Each video lesson is accompanied by a daily action guide that gives you specific exercises to practice what you learned and apply it in your own life. You can watch the videos at your own pace and access them anytime from any device.

      -

      How to be more charismatic with Charlie Houpert
      -Charisma University review: Is it worth it?
      -Charlie Houpert's secrets to master social skills
      -What is Charisma University and how can it help you?
      -Learn from Charlie Houpert: The charisma coach
      -Charisma University: A comprehensive course on charisma
      -How Charlie Houpert changed my life with Charisma University
      -The benefits of joining Charisma University by Charlie Houpert
      -How to enroll in Charisma University and boost your charisma
      -Charlie Houpert's tips on how to be confident and charismatic
      -Charisma University: The ultimate guide to charisma by Charlie Houpert
      -How to improve your communication skills with Charisma University
      -Charlie Houpert's story: How he became a charisma expert
      -Charisma University testimonials: What people are saying about it
      -How to access Charisma University and learn from Charlie Houpert
      -Charlie Houpert's advice on how to make a good first impression
      -Charisma University: How it works and what you will learn
      -How to develop your charisma with Charlie Houpert's methods
      -Charisma University FAQ: Everything you need to know
      -Charlie Houpert's best practices on how to be more likable
      -Charisma University: The best investment for your personal growth
      -How to overcome social anxiety with Charisma University
      -Charlie Houpert's insights on how to be more influential
      -Charisma University: A proven system to become more charismatic
      -How to build rapport and trust with Charisma University
      -Charlie Houpert's strategies on how to be more persuasive
      -Charisma University: A step-by-step program to master charisma
      -How to handle difficult conversations with Charisma University
      -Charlie Houpert's techniques on how to be more funny and witty
      -Charisma University: A game-changer for your social life
      -How to create a positive impact with Charisma University
      -Charlie Houpert's lessons on how to be more authentic and genuine
      -Charisma University: A transformational journey to charisma
      -How to deal with criticism and rejection with Charisma University
      -Charlie Houpert's hacks on how to be more attractive and charming
      -Charisma University: A must-have for anyone who wants to be more charismatic
      -How to boost your self-esteem and confidence with Charisma University
      -Charlie Houpert's wisdom on how to be more inspiring and motivational
      -Charisma University: A unique opportunity to learn from a charisma guru
      -How to connect with anyone with Charisma University

      -

      Charisma University includes daily action guides , worksheets , cheat sheets , and bonuses

      -

      In addition to the video lessons , Charisma University also provides you with various resources to help you get the most out of the program . These include : - Daily action guides : These are PDF documents that summarize the main points of each video lesson and give you specific exercises to practice what you learned and apply it in your own life . They also include tips , reminders , challenges , and extra resources . - Worksheets : These are PDF documents that help you go deeper into some topics and reflect on your progress . They include questions , quizzes , self-assessments , checklists , templates , scripts , etc . - Cheat sheets : These are PDF documents that provide you with quick reference guides for some topics . They include summaries , formulas , frameworks , examples , etc . - Bonuses : These are extra video lessons that cover some advanced topics or special situations . They include topics such as : How To Be Funny , How To Be More Attractive , How To Network Effectively , How To Deal With Bullies , How To Be More Productive , etc .

      -

      Charisma University offers a 60-day money-back guarantee and lifetime access

      -

      Charisma University is confident that their program will help you improve your social skills and become more charismatic . That's why they offer a 60-day money-back guarantee . If for Continuing the article.

      How can you enroll in Charisma University?

      -

      You can enroll in Charisma University by visiting the official website and choosing your payment option

      -

      If you are interested in joining Charisma University, you can enroll by visiting the official website at https://www.charismaoncommand.com/c-university/. There, you will find more information about the program, the curriculum, the instructors, and the testimonials from previous students. You will also be able to watch some free videos that will give you a taste of what you will learn in Charisma University.

      -

      To enroll, you will need to choose your payment option. You can either pay in full or pay in six monthly installments. Both options come with a 60-day money-back guarantee, which means you can try Charisma University risk-free for two months. If you are not satisfied with the program for any reason, you can request a full refund within 60 days of your purchase.

      -

      You can also watch some free videos and read some testimonials from previous students

      -

      If you are not sure if Charisma University is right for you, you can also watch some free videos and read some testimonials from previous students. These will give you an idea of what you can expect from the program and how it has helped other people improve their social skills and charisma.

      -

      Some of the free videos you can watch include: - How To Be More Confident In Any Situation - How To Make A Great First Impression - How To Be Funny And Make People Laugh - How To Tell A Captivating Story - How To Be More Attractive Some of the testimonials you can read include: - "Charisma University gave me the opportunity to finish my Ph.D. in clinical psychology, which has made a massive impact on my career. I have been able to teach at a university, work toward doctoral-level licensure, and gain employment at a psychiatric hospital. I cannot express how grateful I am for Charisma University." - Student Testimonies | About | Charisma University - "Charisma University is Charlie Houpert’s online course on social skills. The goal of Charisma University is to increase your social skills and teach you how to become more charismatic . This is a review of Charisma University." - Charisma University: Brutally Honest Review & Summary | Power Dynamics™ - "Very efficient and prompt feedbacks." - Charisma University - Reviews | Facebook

      -

      Conclusion

      -

      Charisma University is an online course that teaches you how to improve your social skills and become more charismatic. It is created by Charlie Houpert, the co-founder of Charisma on Command, a YouTube channel and a book on charisma. It is a 6-week step-by-step program that covers everything from making a great first impression, to building confidence, to mastering conversation skills, to telling captivating stories, to developing a magnetic presence, to leading and influencing others with charisma. It also includes daily action guides, worksheets, cheat sheets, and bonuses. It offers a 60-day money-back guarantee and lifetime access. You can enroll in Charisma University by visiting the official website and choosing your payment option. You can also watch some free videos and read some testimonials from previous students.

      -

      If you want to take your charisma to the next level and enjoy more success and happiness in your personal and professional life, Charisma University might be the perfect course for you.

      -

      FAQs

      -

      What is charisma?

      -

      Charisma is the ability to attract, influence, and inspire others with your personality and presence. It is a combination of confidence, charm, humor, warmth, authenticity, and other qualities that make people like you and want to follow you.

      -

      Why is charisma important?

      -

      Charisma is important because it can help you achieve your goals and dreams in life. With charisma, you can make a great impression on anyone you meet, connect with people easily, tell captivating stories, and lead with influence. You can also enjoy more happiness, confidence, and fulfillment in your life.

      -

      Can charisma be learned?

      -

      Yes, charisma can be learned. While some people may have a natural talent for charisma, anyone can improve their charisma with practice and guidance. Charisma is not a fixed trait that you are born with or without. It is a skill that you can develop and refine over time.

      -

      How long does it take to complete Charisma University?

      -

      Charisma University is designed to be completed in 6 weeks. Each week consists of several video lessons that last about 15 minutes each. You will also need to spend some time doing the exercises in the daily action guides and worksheets. However, you can go at your own pace and access the program anytime from any device.

      -

      How much does Charisma University cost?

      -

      Charisma University costs $597 if you pay in full or $119 if you pay in six monthly installments. Both options come with a 60-day money-back guarantee and lifetime access.

      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/realgenius/NousResearch-Yarn-Mistral-7b-128k/app.py b/spaces/realgenius/NousResearch-Yarn-Mistral-7b-128k/app.py deleted file mode 100644 index d5e12988d70a9beb1556e0db3295fa7a1ccf0306..0000000000000000000000000000000000000000 --- a/spaces/realgenius/NousResearch-Yarn-Mistral-7b-128k/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/NousResearch/Yarn-Mistral-7b-128k").launch() \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Airy Youtube Downloader Crack Pro [PORTABLE].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Airy Youtube Downloader Crack Pro [PORTABLE].md deleted file mode 100644 index fd537bb76c18b0857f7147aa03e20da5bff5633b..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Airy Youtube Downloader Crack Pro [PORTABLE].md +++ /dev/null @@ -1,81 +0,0 @@ - ----> ServiceClient failure for DeepLeo[/ERROR]

      -

      airy youtube downloader crack pro


      Download Zip ✵✵✵ https://urlgoal.com/2uCKtE



      -

      Alternatives to Airy YouTube Downloader Crack Pro

      -

      If you are looking for some alternatives to Airy YouTube Downloader Crack Pro, you may want to check out these other software that can also download YouTube videos to your computer:

      -
        -
      • 4K Video Downloader: This is a software that lets you download YouTube videos in 4K quality. You can also download playlists, channels, subtitles, and 360-degree videos. You can also extract audio from YouTube videos and save them as MP3, M4A, or OGG. You can also download videos from other sites such as Vimeo, TikTok, Facebook, etc.
      • -
      • YTD Video Downloader: This is a software that lets you download YouTube videos in various formats and resolutions. You can also convert YouTube videos to MP3 or other formats. You can also download videos from other sites such as Dailymotion, Metacafe, etc.
      • -
      • Freemake Video Downloader: This is a software that lets you download YouTube videos in HD quality. You can also download playlists, channels, and user favorites. You can also convert YouTube videos to MP3 or other formats. You can also download videos from other sites such as Facebook, Vimeo, etc.
      • -
      -

      FAQs about Airy YouTube Downloader Crack Pro

      -

      Here are some frequently asked questions and answers about Airy YouTube Downloader Crack Pro:

      -
        -
      1. Is Airy YouTube Downloader Crack Pro safe to use?
      2. -

        Airy YouTube Downloader Crack Pro may not be safe to use because it is a cracked software that may contain viruses, malware, or spyware. It may also violate the terms and conditions of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, it is recommended to use a reliable antivirus software and a VPN service when using Airy YouTube Downloader Crack Pro.

        -
      3. Is Airy YouTube Downloader Crack Pro legal to use?
      4. -

        Airy YouTube Downloader Crack Pro may not be legal to use because it is a cracked software that may infringe the intellectual property rights of the original developers and owners of the software. It may also violate the copyright laws and regulations of the countries where you use it. It may also breach the privacy and security policies of YouTube and other websites. Therefore, it is advised to use Airy YouTube Downloader Crack Pro at your own risk and responsibility.

        -

        -
      5. How to uninstall Airy YouTube Downloader Crack Pro?
      6. -

        To uninstall Airy YouTube Downloader Crack Pro, you need to follow these steps:

        -
          -
        • Go to the Control Panel on your computer.
        • -
        • Select Programs and Features or Add or Remove Programs.
        • -
        • Find Airy YouTube Downloader Crack Pro in the list of programs and click on Uninstall.
        • -
        • Follow the instructions on the screen to complete the uninstallation.
        • -
        • Delete any leftover files or folders related to Airy YouTube Downloader Crack Pro on your computer.
        • -
        -
      -

      How to Contact Airy YouTube Downloader Crack Pro Support?

      -

      If you have any questions, issues, or feedback about Airy YouTube Downloader Crack Pro, you may want to contact the support team of the software. However, since it is a cracked software, you may not get any official or reliable support from the original developers or owners of the software. You may also face some legal or ethical consequences for using a cracked software. Therefore, it is suggested to use Airy YouTube Downloader Crack Pro at your own risk and discretion.

      -

      However, if you still want to contact the support team of Airy YouTube Downloader Crack Pro, you can try these methods:

      -
        -
      • Email: You can send an email to the support team of Airy YouTube Downloader Crack Pro at [email protected] You can also use the contact form on their website at https://airy-youtube-downloader.com/contact-us/. However, you may not get any response or solution from them.
      • -
      • Phone: You can call the support team of Airy YouTube Downloader Crack Pro at +1-888-881-9070. However, you may not get any answer or assistance from them.
      • -
      • Live Chat: You can chat with the support team of Airy YouTube Downloader Crack Pro on their website at https://airy-youtube-downloader.com/. However, you may not get any reply or help from them.
      • -
      -

      Conclusion

      -

      Airy YouTube Downloader Crack Pro is a software that lets you download YouTube videos to your computer with ease. You can download videos in various formats and resolutions, and also convert them to MP3. You can also share downloaded videos with your friends and organize them with the bookmark manager function. However, you should also be aware of the risks and limitations of using cracked software. It may not be safe, legal, or reliable to use. It may also violate the rights and policies of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, we recommend you to use a legitimate and licensed software instead of Airy YouTube Downloader Crack Pro. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to comment below.

      -

      How to Update Airy YouTube Downloader Crack Pro?

      -

      If you are using Airy YouTube Downloader Crack Pro, you may want to update it to the latest version to enjoy the new features and improvements. However, since it is a cracked software, you may not be able to update it automatically or manually. You may also face some errors or issues while updating it. You may also lose the crack or activation of the software after updating it. Therefore, it is advised to use Airy YouTube Downloader Crack Pro at your own risk and responsibility.

      -

      However, if you still want to update Airy YouTube Downloader Crack Pro, you can try these methods:

      -
        -
      • Automatic Update: You can check if there is any automatic update available for Airy YouTube Downloader Crack Pro by opening the software and clicking on the "Help" menu. Then, click on "Check for Updates" and see if there is any new version available. If there is, click on "Download and Install" and follow the instructions on the screen. However, you may not get any automatic update for Airy YouTube Downloader Crack Pro because it is a cracked software.
      • -
      • Manual Update: You can check if there is any manual update available for Airy YouTube Downloader Crack Pro by visiting the websites that offer Airy YouTube Downloader Crack Pro for free download, such as FileCR, Pesktop, or Gandhishipping. Then, look for the latest version of Airy YouTube Downloader Crack Pro and download it to your computer. After downloading it, you need to uninstall the old version of Airy YouTube Downloader Crack Pro and install the new version. However, you may not get any manual update for Airy YouTube Downloader Crack Pro because it is a cracked software.
      • -
      -

      How to Uninstall Airy YouTube Downloader Crack Pro?

      -

      If you want to uninstall Airy YouTube Downloader Crack Pro from your computer, you need to follow these steps:

      -
        -
      1. Go to the Control Panel on your computer.
      2. -
      3. Select Programs and Features or Add or Remove Programs.
      4. -
      5. Find Airy YouTube Downloader Crack Pro in the list of programs and click on Uninstall.
      6. -
      7. Follow the instructions on the screen to complete the uninstallation.
      8. -
      9. Delete any leftover files or folders related to Airy YouTube Downloader Crack Pro on your computer.
      10. -
      -

      Conclusion

      -

      Airy YouTube Downloader Crack Pro is a software that lets you download YouTube videos to your computer with ease. You can download videos in various formats and resolutions, and also convert them to MP3. You can also share downloaded videos with your friends and organize them with the bookmark manager function. However, you should also be aware of the risks and limitations of using cracked software. It may not be safe, legal, or reliable to use. It may also violate the rights and policies of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, we recommend you to use a legitimate and licensed software instead of Airy YouTube Downloader Crack Pro. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to comment below.

      -

      How to Use Airy YouTube Downloader Crack Pro with Other Devices?

      -

      If you want to use Airy YouTube Downloader Crack Pro with other devices such as smartphones, tablets, or TVs, you need to follow these steps:

      -
        -
      1. Download and install Airy YouTube Downloader Crack Pro on your computer.
      2. -
      3. Download and install a media server software on your computer, such as Plex, Kodi, or VLC.
      4. -
      5. Launch the media server software and add the folder where you save your downloaded videos from Airy YouTube Downloader Crack Pro.
      6. -
      7. Connect your other devices to the same network as your computer.
      8. -
      9. Launch the media server app on your other devices and look for the folder where you save your downloaded videos from Airy YouTube Downloader Crack Pro.
      10. -
      11. Select the video that you want to watch and enjoy it on your other devices.
      12. -
      -

      How to Troubleshoot Airy YouTube Downloader Crack Pro?

      -

      If you face any problems or errors while using Airy YouTube Downloader Crack Pro, you can try these solutions:

      -
        -
      • Check your internet connection: Make sure that your internet connection is stable and fast enough to download YouTube videos. You can also try using a different browser or device to access YouTube.
      • -
      • Check your firewall or antivirus settings: Make sure that your firewall or antivirus software is not blocking or deleting Airy YouTube Downloader Crack Pro or its files. You can also try disabling or adding an exception for Airy YouTube Downloader Crack Pro in your firewall or antivirus settings.
      • -
      • Check your video URL: Make sure that the video URL that you paste into Airy YouTube Downloader Crack Pro is valid and correct. You can also try copying the video URL from a different source or site.
      • -
      • Check your video format and resolution: Make sure that the video format and resolution that you select in Airy YouTube Downloader Crack Pro are compatible with your device and player. You can also try changing the video format and resolution to a different option.
      • -
      • Check your crack or activation: Make sure that your crack or activation of Airy YouTube Downloader Crack Pro is working properly and not expired or corrupted. You can also try reapplying the crack or activation of Airy YouTube Downloader Crack Pro.
      • -
      -

      Conclusion

      -

      Airy YouTube Downloader Crack Pro is a software that lets you download YouTube videos to your computer with ease. You can download videos in various formats and resolutions, and also convert them to MP3. You can also share downloaded videos with your friends and organize them with the bookmark manager function. However, you should also be aware of the risks and limitations of using cracked software. It may not be safe, legal, or reliable to use. It may also violate the rights and policies of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, we recommend you to use a legitimate and licensed software instead of Airy YouTube Downloader Crack Pro. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to comment below.

      -

      Conclusion

      -

      Airy YouTube Downloader Crack Pro is a software that lets you download YouTube videos to your computer with ease. You can download videos in various formats and resolutions, and also convert them to MP3. You can also share downloaded videos with your friends and organize them with the bookmark manager function. However, you should also be aware of the risks and limitations of using cracked software. It may not be safe, legal, or reliable to use. It may also violate the rights and policies of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, we recommend you to use a legitimate and licensed software instead of Airy YouTube Downloader Crack Pro. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to comment below.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cambridge English Pronouncing Dictionary 17th Edition HOT Download.rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cambridge English Pronouncing Dictionary 17th Edition HOT Download.rar.md deleted file mode 100644 index f21d142d440d3d2fcadc9413ba1f34cd14a54733..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cambridge English Pronouncing Dictionary 17th Edition HOT Download.rar.md +++ /dev/null @@ -1,89 +0,0 @@ -
      -

      How to Download Cambridge English Pronouncing Dictionary 17th Edition in RAR Format

      - -

      If you are looking for a reliable and comprehensive source of English pronunciation, you might want to download Cambridge English Pronouncing Dictionary 17th Edition in RAR format. This dictionary is one of the most widely used and respected guides to English pronunciation, covering over 230,000 words and phrases in both British and American English. It also includes information on regional and social variations, as well as the pronunciation of proper names, foreign words, and abbreviations.

      -

      Cambridge English Pronouncing Dictionary 17th Edition Download.rar


      Downloadhttps://urlgoal.com/2uCJXR



      - -

      Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is easy and convenient, as it allows you to save space and time. RAR is a compressed file format that can reduce the size of large files and make them easier to transfer and store. However, you will need a special software to open and extract the files from the RAR archive.

      - -

      Steps to Download Cambridge English Pronouncing Dictionary 17th Edition in RAR Format

      - -

      Here are the steps you need to follow to download Cambridge English Pronouncing Dictionary 17th Edition in RAR format:

      - -
        -
      1. Go to the Internet Archive website and search for "Cambridge English Pronouncing Dictionary 17th Edition". You will find several results that contain the dictionary in different formats.
      2. -
      3. Select the result that has the RAR extension in the file name. For example, "cambridge-english-pronouncing-dictionary-17th-edition_202012.rar". This is the file you want to download.
      4. -
      5. Click on the "DOWNLOAD OPTIONS" button and choose "RAR" from the list. This will start the download process.
      6. -
      7. Wait for the download to finish. Depending on your internet speed and the file size, this may take a few minutes or longer.
      8. -
      9. Once the download is complete, locate the file on your computer. It should be in your default download folder or wherever you chose to save it.
      10. -
      11. Right-click on the file and select "Extract Here" or "Extract to" from the menu. You will need a software that can handle RAR files, such as WinRAR or 7-Zip. If you don't have one, you can download and install it from their official websites.
      12. -
      13. After extracting the files, you will see a folder that contains the dictionary files. You can open them with any PDF reader or browser.
      14. -
      - -

      Benefits of Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR Format

      - -

      Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format has several benefits, such as:

      - -
        -
      • It saves space on your computer or device, as RAR files are smaller than other formats.
      • -
      • It reduces the download time, as RAR files are faster to transfer and download.
      • -
      • It preserves the quality and integrity of the original files, as RAR files are lossless and secure.
      • -
      • It allows you to access the dictionary offline, without needing an internet connection.
      • -
      - -

      Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is a smart and convenient way to improve your English pronunciation skills. With this dictionary, you can learn how to pronounce any word or phrase correctly and confidently.

      -

      -

      Features of Cambridge English Pronouncing Dictionary 17th Edition

      - -

      Cambridge English Pronouncing Dictionary 17th Edition is not just a simple dictionary of pronunciation. It is also a rich source of information and guidance on various aspects of English language and culture. Some of the features of this dictionary are:

      - -
        -
      • It covers over 230,000 words and phrases in both British and American English, as well as other varieties of English spoken around the world.
      • -
      • It includes the pronunciation of proper names, foreign words, abbreviations, acronyms, and symbols.
      • -
      • It provides information on regional and social variations in pronunciation, such as Cockney, Scottish, Australian, Indian, or African American English.
      • -
      • It explains the relationship between spelling and sound, and the rules and patterns of English pronunciation.
      • -
      • It offers advice on common areas of difficulty, such as stress, intonation, rhythm, and weak forms.
      • -
      • It contains lively study pages that illustrate and practice various topics related to pronunciation, such as homophones, word stress, or silent letters.
      • -
      - -

      Cambridge English Pronouncing Dictionary 17th Edition is an essential tool for anyone who wants to master the pronunciation of English. Whether you are a student, a teacher, a professional, or a casual learner, you will find this dictionary invaluable and enjoyable.

      - -

      Why You Should Download Cambridge English Pronouncing Dictionary 17th Edition in RAR Format

      - -

      Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is a smart choice for several reasons. First of all, you will get access to the most up-to-date and authoritative pronouncing dictionary in English. You will be able to learn how to pronounce any word or phrase correctly and confidently. You will also be able to improve your listening and speaking skills by following the clear and accurate audio recordings that accompany the dictionary entries.

      - -

      Secondly, you will save space and time by downloading the dictionary in RAR format. RAR is a compressed file format that can reduce the size of large files and make them easier to transfer and store. You will be able to download the dictionary faster and use less storage space on your computer or device. You will also be able to open and extract the files from the RAR archive with a simple software that you can download and install for free.

      - -

      Thirdly, you will be able to use the dictionary offline, without needing an internet connection. You will be able to access the dictionary anytime and anywhere you want. You will also be able to print or copy the dictionary pages if you need to. You will have more flexibility and convenience in using the dictionary as you wish.

      - -

      Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is a wise and convenient way to improve your English pronunciation skills. With this dictionary, you will be able to learn how to pronounce any word or phrase correctly and confidently.

      -

      How to Use Cambridge English Pronouncing Dictionary 17th Edition

      - -

      Once you have downloaded and extracted Cambridge English Pronouncing Dictionary 17th Edition in RAR format, you can start using it right away. Here are some tips on how to use the dictionary effectively:

      - -
        -
      • To find the pronunciation of a word or phrase, type it in the search box or browse the alphabetical list of entries. You will see the word or phrase in bold, followed by its phonetic transcription and audio recording. You can click on the speaker icon to listen to the pronunciation. You can also see the word class, the origin, and the usage notes of the word or phrase.
      • -
      • To learn more about a specific aspect of pronunciation, go to the study pages at the end of the dictionary. You will find various topics related to pronunciation, such as homophones, word stress, or silent letters. Each topic has an explanation, examples, and exercises to help you understand and practice.
      • -
      • To compare the pronunciation of different varieties of English, go to the regional and social variations section at the beginning of the dictionary. You will find information on how English is pronounced in different regions and social groups, such as Cockney, Scottish, Australian, Indian, or African American English. You will also find audio recordings of speakers from different backgrounds.
      • -
      - -

      Cambridge English Pronouncing Dictionary 17th Edition is a user-friendly and comprehensive dictionary that will help you improve your pronunciation skills. You can use it as a reference, a guide, or a learning tool. You can also customize it according to your preferences and needs.

      - -

      Conclusion

      - -

      Pronunciation is an important part of learning and communicating in English. It can affect your confidence, your comprehension, and your impression on others. That's why you need a reliable and authoritative source of pronunciation, such as Cambridge English Pronouncing Dictionary 17th Edition.

      - -

      Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is a smart and convenient way to access this dictionary. You will be able to save space and time, as well as use the dictionary offline. You will also be able to enjoy the features and benefits of this dictionary, such as:

      - -
        -
      • It covers over 230,000 words and phrases in both British and American English, as well as other varieties of English spoken around the world.
      • -
      • It includes the pronunciation of proper names, foreign words, abbreviations, acronyms, and symbols.
      • -
      • It provides information on regional and social variations in pronunciation.
      • -
      • It explains the relationship between spelling and sound, and the rules and patterns of English pronunciation.
      • -
      • It offers advice on common areas of difficulty.
      • -
      • It contains lively study pages that illustrate and practice various topics related to pronunciation.
      • -
      - -

      If you want to improve your pronunciation skills and learn how to pronounce any word or phrase correctly and confidently, you should download Cambridge English Pronouncing Dictionary 17th Edition in RAR format today. It is an essential tool for anyone who wants to master the pronunciation of English.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Keygen Product Design Manufacturing Collection 2019 Download.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Keygen Product Design Manufacturing Collection 2019 Download.md deleted file mode 100644 index 2bacd289e3d99365ce4bf43e1b8309b93fbbc63a..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Keygen Product Design Manufacturing Collection 2019 Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

      crack Keygen Product Design Manufacturing Collection 2019 download


      DOWNLOADhttps://urlgoal.com/2uCJbT



      -
      -Subscribe to Product Design & Manufacturing Collection to get the tools you ... Trade-in your perpetual license serial number now and save 20% on a 3-year ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crypto Box Dongle Emulator 11.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crypto Box Dongle Emulator 11.md deleted file mode 100644 index bde9b0ec410071cf67851655951e9df83b865509..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crypto Box Dongle Emulator 11.md +++ /dev/null @@ -1,52 +0,0 @@ -

      crypto box dongle emulator 11


      DOWNLOAD ★★★ https://urlgoal.com/2uCJC2



      - -.3 - -Download crypto box dongle emulator free - -The recently discovered malware infection script Crypto box dongle emulator was designed to steal personal data from infected computers and spread to other computers by using the free download skidrow.com/download-bitcoin-client/ and other similar types of malicious programs. - -Crypto box dongle emulator is a piece of sophisticated malware that does its work quietly and skilfully behind the scenes. In contrast, the virus creates a lot of annoyances, it adds a special Web browser for its operators to monitor the activity of the victims. - -It is one of the most dangerous threats that a hacker can imagine. Once the malware has been activated on the computer, the virus can then: - -Activate the camera. - -Capture keystrokes from the victim’s keyboard. - -Steal the victim’s contacts, including names, phone numbers and emails. - -Encrypt passwords on your PC. - -Install itself on your Windows system as a Startup item. - -Create a shortcut on your desktop and start the browser Crypto box dongle emulator automatically. - -It is not possible to delete this virus. The malware works in stealth mode. Moreover, it cannot be detected by anti-malware programs and is quite difficult to remove. - -Crypto box dongle emulator: one of the most dangerous threats - -It is not difficult to track down the source of this virus, because it is quite easy to identify the file that contains the malware and its directory path on your hard disk. The file can be found in: C:\Documents and Settings\user\Application Data\Broker or C:\Users\user\AppData\Roaming\Broker - -The folder named Crypto box dongle emulator contains the following files: - -C:\Documents and Settings\user\Application Data\Broker\Macro, - -Crypto box dongle emulator.dll, - -Crypto box dongle emulator.exe, - -Crypto box dongle emulator.conf, - -Crypto box dongle emulator.xml, - -Crypto box dongle emulator.ini, - -Crypto box dongle emulator.dat, - -Crypto box dongle emulator.crt. - -The virus is very dangerous for your PC and is a reason to worry. It is not just a nuisance for you; it could be used for hackers to steal 4fefd39f24
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Torrent Crack Apowersoft Gestionnaire De 11.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Torrent Crack Apowersoft Gestionnaire De 11.md deleted file mode 100644 index aa4468c3bc060367a7fc9a5a8e0f2518488dee33..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Torrent Crack Apowersoft Gestionnaire De 11.md +++ /dev/null @@ -1,6 +0,0 @@ -

      download torrent crack apowersoft gestionnaire de 11


      Download ————— https://urlgoal.com/2uCKnC



      -
      - 899543212b
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Thedarkknightrises720ptamildubbed).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Thedarkknightrises720ptamildubbed).md deleted file mode 100644 index bf887f06ca1f7f1e069630f5aed2c8f90bf67a77..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Thedarkknightrises720ptamildubbed).md +++ /dev/null @@ -1,14 +0,0 @@ -

      HD Online Player (Thedarkknightrises720ptamildubbed)


      Downloadhttps://urlgoal.com/2uCMeR



      - -by dark players.. 14-Nov-2019 - (HD-720p.1080p)~The Dark Knight Rises FULL_MOVIE - 2012 Download FRE E. .House on Haunted Hill ( .) - HD-DVDRip. -House by the lake (The Lake House). -Film : The Lake House Original title: The Lake House Country: USA Release year: 2006 Genre: Melodrama,. -The film is very emotional, light and positive. -Lake House (2006) online. -Watch The Lake House movie online in high quality for free. -Lake House online movie in good quality, without registration and SMS. -The Lake House 2006 HD - 720p: Download torrent. -Lake House online movie in good quality, without registration and sms. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (godzilla 1998 Download 720p Movie).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (godzilla 1998 Download 720p Movie).md deleted file mode 100644 index 821bc891d9cdc573c85552e2fd3a613799da2355..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (godzilla 1998 Download 720p Movie).md +++ /dev/null @@ -1,22 +0,0 @@ - -If you want me to write another article, please provide a different query that does not involve the same keyword. You can also ask me to edit or improve the existing article, or to write a summary or a conclusion for it. - -Thank you for your cooperation.?

      -

      HD Online Player (godzilla 1998 download 720p movie)


      Download File ★★★★★ https://urlgoal.com/2uCL1X



      - - -If you want me to write another article, please provide a different query that does not involve the same keyword. You can also ask me to edit or improve the existing article, or to write a summary or a conclusion for it. - -Thank you for your cooperation.? - - -If you want me to write another article, please provide a different query that does not involve the same keyword. You can also ask me to edit or improve the existing article, or to write a summary or a conclusion for it. - -Thank you for your cooperation.? - - -If you want me to write another article, please provide a different query that does not involve the same keyword. You can also ask me to edit or improve the existing article, or to write a summary or a conclusion for it. - -Thank you for your cooperation and interest. Goodbye.?

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/ririah13/Test/README.md b/spaces/ririah13/Test/README.md deleted file mode 100644 index 2ace8e6f8db7834acb3c638f42edd92d6c6bf0e1..0000000000000000000000000000000000000000 --- a/spaces/ririah13/Test/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Test -emoji: 😻 -colorFrom: indigo -colorTo: green -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/robinhad/qirimtatar-tts/README.md b/spaces/robinhad/qirimtatar-tts/README.md deleted file mode 100644 index 99ae061bd86821aaf04ffde16b217041d91e9387..0000000000000000000000000000000000000000 --- a/spaces/robinhad/qirimtatar-tts/README.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Qirimtatar Tts -emoji: 🦀 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.40.1 -python_version: '3.10' -app_file: app.py -pinned: false ---- - -# Crimean Tatar (Qirimtatar) TTS -Text-to-Speech for Crimean Tatar language - -![pytest](https://github.com/robinhad/qirimtatar-tts/actions/workflows/tests.yml/badge.svg) [![Open In HF🤗 Space ](https://img.shields.io/badge/Open%20Demo-%F0%9F%A4%97%20Space-yellow)](https://huggingface.co/spaces/robinhad/qirimtatar-tts) - -Source code: https://github.com/robinhad/qirimtatar-tts -Online demo: https://huggingface.co/spaces/robinhad/qirimtatar-tts -You're welcome to join UA Speech Recognition and Synthesis community: Telegram https://t.me/speech_recognition_uk -Note: demo input is saved to improve Text-to-Speech engine and demo experience. By using this demo you give your consent to this. - -## Examples -Test sentence: -`Qırımtatarlar üç subetnik gruppasından er birisiniñ (tatlar, noğaylar ve yalıboylular) öz şivesi bar.` - -### Kemal - -https://user-images.githubusercontent.com/5759207/200072078-7ab22d95-73d3-4eb7-ab9f-6f0dadc950c1.mp4 - -### Nuri - -https://user-images.githubusercontent.com/5759207/200072104-ab1c204a-fd16-43f4-94a9-bc8871a7c2e3.mp4 - -### Arslan - -https://user-images.githubusercontent.com/5759207/200072123-e2816c40-9ecb-4a6f-9136-51fffc42f258.mp4 - -# Attribution - -- Model training - [Yurii Paniv @robinhad](https://github.com/robinhad) -- Crimean Tatar dataset - [Yehor Smoliakov @egorsmkv](https://github.com/egorsmkv) -- Huge thanks for voice to: Nuri, Arslan, Kemal -- Transliteration: [prosvita/crh.transliteration](https://github.com/prosvita/crh.transliteration) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/trident_resnet.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/trident_resnet.py deleted file mode 100644 index 013ba64b59d81e5be3a3f00b65c6a76915247c9d..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/trident_resnet.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcv.runner import BaseModule -from torch.nn.modules.utils import _pair - -from mmdet.models.backbones.resnet import Bottleneck, ResNet -from mmdet.models.builder import BACKBONES - - -class TridentConv(BaseModule): - """Trident Convolution Module. - - Args: - in_channels (int): Number of channels in input. - out_channels (int): Number of channels in output. - kernel_size (int): Size of convolution kernel. - stride (int, optional): Convolution stride. Default: 1. - trident_dilations (tuple[int, int, int], optional): Dilations of - different trident branch. Default: (1, 2, 3). - test_branch_idx (int, optional): In inference, all 3 branches will - be used if `test_branch_idx==-1`, otherwise only branch with - index `test_branch_idx` will be used. Default: 1. - bias (bool, optional): Whether to use bias in convolution or not. - Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - trident_dilations=(1, 2, 3), - test_branch_idx=1, - bias=False, - init_cfg=None): - super(TridentConv, self).__init__(init_cfg) - self.num_branch = len(trident_dilations) - self.with_bias = bias - self.test_branch_idx = test_branch_idx - self.stride = _pair(stride) - self.kernel_size = _pair(kernel_size) - self.paddings = _pair(trident_dilations) - self.dilations = trident_dilations - self.in_channels = in_channels - self.out_channels = out_channels - self.bias = bias - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels, *self.kernel_size)) - if bias: - self.bias = nn.Parameter(torch.Tensor(out_channels)) - else: - self.bias = None - - def extra_repr(self): - tmpstr = f'in_channels={self.in_channels}' - tmpstr += f', out_channels={self.out_channels}' - tmpstr += f', kernel_size={self.kernel_size}' - tmpstr += f', num_branch={self.num_branch}' - tmpstr += f', test_branch_idx={self.test_branch_idx}' - tmpstr += f', stride={self.stride}' - tmpstr += f', paddings={self.paddings}' - tmpstr += f', dilations={self.dilations}' - tmpstr += f', bias={self.bias}' - return tmpstr - - def forward(self, inputs): - if self.training or self.test_branch_idx == -1: - outputs = [ - F.conv2d(input, self.weight, self.bias, self.stride, padding, - dilation) for input, dilation, padding in zip( - inputs, self.dilations, self.paddings) - ] - else: - assert len(inputs) == 1 - outputs = [ - F.conv2d(inputs[0], self.weight, self.bias, self.stride, - self.paddings[self.test_branch_idx], - self.dilations[self.test_branch_idx]) - ] - - return outputs - - -# Since TridentNet is defined over ResNet50 and ResNet101, here we -# only support TridentBottleneckBlock. -class TridentBottleneck(Bottleneck): - """BottleBlock for TridentResNet. - - Args: - trident_dilations (tuple[int, int, int]): Dilations of different - trident branch. - test_branch_idx (int): In inference, all 3 branches will be used - if `test_branch_idx==-1`, otherwise only branch with index - `test_branch_idx` will be used. - concat_output (bool): Whether to concat the output list to a Tensor. - `True` only in the last Block. - """ - - def __init__(self, trident_dilations, test_branch_idx, concat_output, - **kwargs): - - super(TridentBottleneck, self).__init__(**kwargs) - self.trident_dilations = trident_dilations - self.num_branch = len(trident_dilations) - self.concat_output = concat_output - self.test_branch_idx = test_branch_idx - self.conv2 = TridentConv( - self.planes, - self.planes, - kernel_size=3, - stride=self.conv2_stride, - bias=False, - trident_dilations=self.trident_dilations, - test_branch_idx=test_branch_idx, - init_cfg=dict( - type='Kaiming', - distribution='uniform', - mode='fan_in', - override=dict(name='conv2'))) - - def forward(self, x): - - def _inner_forward(x): - num_branch = ( - self.num_branch - if self.training or self.test_branch_idx == -1 else 1) - identity = x - if not isinstance(x, list): - x = (x, ) * num_branch - identity = x - if self.downsample is not None: - identity = [self.downsample(b) for b in x] - - out = [self.conv1(b) for b in x] - out = [self.norm1(b) for b in out] - out = [self.relu(b) for b in out] - - if self.with_plugins: - for k in range(len(out)): - out[k] = self.forward_plugin(out[k], - self.after_conv1_plugin_names) - - out = self.conv2(out) - out = [self.norm2(b) for b in out] - out = [self.relu(b) for b in out] - if self.with_plugins: - for k in range(len(out)): - out[k] = self.forward_plugin(out[k], - self.after_conv2_plugin_names) - - out = [self.conv3(b) for b in out] - out = [self.norm3(b) for b in out] - - if self.with_plugins: - for k in range(len(out)): - out[k] = self.forward_plugin(out[k], - self.after_conv3_plugin_names) - - out = [ - out_b + identity_b for out_b, identity_b in zip(out, identity) - ] - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = [self.relu(b) for b in out] - if self.concat_output: - out = torch.cat(out, dim=0) - return out - - -def make_trident_res_layer(block, - inplanes, - planes, - num_blocks, - stride=1, - trident_dilations=(1, 2, 3), - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None, - test_branch_idx=-1): - """Build Trident Res Layers.""" - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = [] - conv_stride = stride - downsample.extend([ - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - for i in range(num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride if i == 0 else 1, - trident_dilations=trident_dilations, - downsample=downsample if i == 0 else None, - style=style, - with_cp=with_cp, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - dcn=dcn, - plugins=plugins, - test_branch_idx=test_branch_idx, - concat_output=True if i == num_blocks - 1 else False)) - inplanes = planes * block.expansion - return nn.Sequential(*layers) - - -@BACKBONES.register_module() -class TridentResNet(ResNet): - """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to - ResNet, while in stage 3, Trident BottleBlock is utilized to replace the - normal BottleBlock to yield trident output. Different branch shares the - convolution weight but uses different dilations to achieve multi-scale - output. - - / stage3(b0) \ - x - stem - stage1 - stage2 - stage3(b1) - output - \ stage3(b2) / - - Args: - depth (int): Depth of resnet, from {50, 101, 152}. - num_branch (int): Number of branches in TridentNet. - test_branch_idx (int): In inference, all 3 branches will be used - if `test_branch_idx==-1`, otherwise only branch with index - `test_branch_idx` will be used. - trident_dilations (tuple[int]): Dilations of different trident branch. - len(trident_dilations) should be equal to num_branch. - """ # noqa - - def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, - **kwargs): - - assert num_branch == len(trident_dilations) - assert depth in (50, 101, 152) - super(TridentResNet, self).__init__(depth, **kwargs) - assert self.num_stages == 3 - self.test_branch_idx = test_branch_idx - self.num_branch = num_branch - - last_stage_idx = self.num_stages - 1 - stride = self.strides[last_stage_idx] - dilation = trident_dilations - dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None - if self.plugins is not None: - stage_plugins = self.make_stage_plugins(self.plugins, - last_stage_idx) - else: - stage_plugins = None - planes = self.base_channels * 2**last_stage_idx - res_layer = make_trident_res_layer( - TridentBottleneck, - inplanes=(self.block.expansion * self.base_channels * - 2**(last_stage_idx - 1)), - planes=planes, - num_blocks=self.stage_blocks[last_stage_idx], - stride=stride, - trident_dilations=dilation, - style=self.style, - with_cp=self.with_cp, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - dcn=dcn, - plugins=stage_plugins, - test_branch_idx=self.test_branch_idx) - - layer_name = f'layer{last_stage_idx + 1}' - - self.__setattr__(layer_name, res_layer) - self.res_layers.pop(last_stage_idx) - self.res_layers.insert(last_stage_idx, layer_name) - - self._freeze_stages() diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/maskformer_head.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/maskformer_head.py deleted file mode 100644 index 566dc074059ef770892d2916e7c44fa54b0f8758..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/maskformer_head.py +++ /dev/null @@ -1,556 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init -from mmcv.cnn.bricks.transformer import (build_positional_encoding, - build_transformer_layer_sequence) -from mmcv.runner import force_fp32 - -from mmdet.core import build_assigner, build_sampler, multi_apply, reduce_mean -from mmdet.models.utils import preprocess_panoptic_gt -from ..builder import HEADS, build_loss -from .anchor_free_head import AnchorFreeHead - - -@HEADS.register_module() -class MaskFormerHead(AnchorFreeHead): - """Implements the MaskFormer head. - - See `Per-Pixel Classification is Not All You Need for Semantic - Segmentation `_ for details. - - Args: - in_channels (list[int]): Number of channels in the input feature map. - feat_channels (int): Number of channels for feature. - out_channels (int): Number of channels for output. - num_things_classes (int): Number of things. - num_stuff_classes (int): Number of stuff. - num_queries (int): Number of query in Transformer. - pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel - decoder. Defaults to None. - enforce_decoder_input_project (bool, optional): Whether to add a layer - to change the embed_dim of tranformer encoder in pixel decoder to - the embed_dim of transformer decoder. Defaults to False. - transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for - transformer decoder. Defaults to None. - positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for - transformer decoder position encoding. Defaults to None. - loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification - loss. Defaults to `CrossEntropyLoss`. - loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss. - Defaults to `FocalLoss`. - loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss. - Defaults to `DiceLoss`. - train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of - Maskformer head. - test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of Maskformer - head. - init_cfg (dict or list[dict], optional): Initialization config dict. - Defaults to None. - """ - - def __init__(self, - in_channels, - feat_channels, - out_channels, - num_things_classes=80, - num_stuff_classes=53, - num_queries=100, - pixel_decoder=None, - enforce_decoder_input_project=False, - transformer_decoder=None, - positional_encoding=None, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0, - class_weight=[1.0] * 133 + [0.1]), - loss_mask=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=20.0), - loss_dice=dict( - type='DiceLoss', - use_sigmoid=True, - activate=True, - naive_dice=True, - loss_weight=1.0), - train_cfg=None, - test_cfg=None, - init_cfg=None, - **kwargs): - super(AnchorFreeHead, self).__init__(init_cfg) - self.num_things_classes = num_things_classes - self.num_stuff_classes = num_stuff_classes - self.num_classes = self.num_things_classes + self.num_stuff_classes - self.num_queries = num_queries - - pixel_decoder.update( - in_channels=in_channels, - feat_channels=feat_channels, - out_channels=out_channels) - self.pixel_decoder = build_plugin_layer(pixel_decoder)[1] - self.transformer_decoder = build_transformer_layer_sequence( - transformer_decoder) - self.decoder_embed_dims = self.transformer_decoder.embed_dims - pixel_decoder_type = pixel_decoder.get('type') - if pixel_decoder_type == 'PixelDecoder' and ( - self.decoder_embed_dims != in_channels[-1] - or enforce_decoder_input_project): - self.decoder_input_proj = Conv2d( - in_channels[-1], self.decoder_embed_dims, kernel_size=1) - else: - self.decoder_input_proj = nn.Identity() - self.decoder_pe = build_positional_encoding(positional_encoding) - self.query_embed = nn.Embedding(self.num_queries, out_channels) - - self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) - self.mask_embed = nn.Sequential( - nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), - nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), - nn.Linear(feat_channels, out_channels)) - - self.test_cfg = test_cfg - self.train_cfg = train_cfg - if train_cfg: - self.assigner = build_assigner(train_cfg.get('assigner', None)) - self.sampler = build_sampler( - train_cfg.get('sampler', None), context=self) - - self.class_weight = loss_cls.get('class_weight', None) - self.loss_cls = build_loss(loss_cls) - self.loss_mask = build_loss(loss_mask) - self.loss_dice = build_loss(loss_dice) - - def init_weights(self): - if isinstance(self.decoder_input_proj, Conv2d): - caffe2_xavier_init(self.decoder_input_proj, bias=0) - - self.pixel_decoder.init_weights() - - for p in self.transformer_decoder.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs, - img_metas): - """Preprocess the ground truth for all images. - - Args: - gt_labels_list (list[Tensor]): Each is ground truth - labels of each bbox, with shape (num_gts, ). - gt_masks_list (list[BitmapMasks]): Each is ground truth - masks of each instances of a image, shape - (num_gts, h, w). - gt_semantic_seg (Tensor | None): Ground truth of semantic - segmentation with the shape (batch_size, n, h, w). - [0, num_thing_class - 1] means things, - [num_thing_class, num_class-1] means stuff, - 255 means VOID. It's None when training instance segmentation. - img_metas (list[dict]): List of image meta information. - - Returns: - tuple: a tuple containing the following targets. - - labels (list[Tensor]): Ground truth class indices\ - for all images. Each with shape (n, ), n is the sum of\ - number of stuff type and number of instance in a image. - - masks (list[Tensor]): Ground truth mask for each\ - image, each with shape (n, h, w). - """ - num_things_list = [self.num_things_classes] * len(gt_labels_list) - num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list) - if gt_semantic_segs is None: - gt_semantic_segs = [None] * len(gt_labels_list) - - targets = multi_apply(preprocess_panoptic_gt, gt_labels_list, - gt_masks_list, gt_semantic_segs, num_things_list, - num_stuff_list, img_metas) - labels, masks = targets - return labels, masks - - def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list, - gt_masks_list, img_metas): - """Compute classification and mask targets for all images for a decoder - layer. - - Args: - cls_scores_list (list[Tensor]): Mask score logits from a single - decoder layer for all images. Each with shape (num_queries, - cls_out_channels). - mask_preds_list (list[Tensor]): Mask logits from a single decoder - layer for all images. Each with shape (num_queries, h, w). - gt_labels_list (list[Tensor]): Ground truth class indices for all - images. Each with shape (n, ), n is the sum of number of stuff - type and number of instance in a image. - gt_masks_list (list[Tensor]): Ground truth mask for each image, - each with shape (n, h, w). - img_metas (list[dict]): List of image meta information. - - Returns: - tuple[list[Tensor]]: a tuple containing the following targets. - - labels_list (list[Tensor]): Labels of all images.\ - Each with shape (num_queries, ). - - label_weights_list (list[Tensor]): Label weights\ - of all images. Each with shape (num_queries, ). - - mask_targets_list (list[Tensor]): Mask targets of\ - all images. Each with shape (num_queries, h, w). - - mask_weights_list (list[Tensor]): Mask weights of\ - all images. Each with shape (num_queries, ). - - num_total_pos (int): Number of positive samples in\ - all images. - - num_total_neg (int): Number of negative samples in\ - all images. - """ - (labels_list, label_weights_list, mask_targets_list, mask_weights_list, - pos_inds_list, - neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list, - mask_preds_list, gt_labels_list, - gt_masks_list, img_metas) - - num_total_pos = sum((inds.numel() for inds in pos_inds_list)) - num_total_neg = sum((inds.numel() for inds in neg_inds_list)) - return (labels_list, label_weights_list, mask_targets_list, - mask_weights_list, num_total_pos, num_total_neg) - - def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, - img_metas): - """Compute classification and mask targets for one image. - - Args: - cls_score (Tensor): Mask score logits from a single decoder layer - for one image. Shape (num_queries, cls_out_channels). - mask_pred (Tensor): Mask logits for a single decoder layer for one - image. Shape (num_queries, h, w). - gt_labels (Tensor): Ground truth class indices for one image with - shape (n, ). n is the sum of number of stuff type and number - of instance in a image. - gt_masks (Tensor): Ground truth mask for each image, each with - shape (n, h, w). - img_metas (dict): Image informtation. - - Returns: - tuple[Tensor]: a tuple containing the following for one image. - - labels (Tensor): Labels of each image. - shape (num_queries, ). - - label_weights (Tensor): Label weights of each image. - shape (num_queries, ). - - mask_targets (Tensor): Mask targets of each image. - shape (num_queries, h, w). - - mask_weights (Tensor): Mask weights of each image. - shape (num_queries, ). - - pos_inds (Tensor): Sampled positive indices for each image. - - neg_inds (Tensor): Sampled negative indices for each image. - """ - target_shape = mask_pred.shape[-2:] - if gt_masks.shape[0] > 0: - gt_masks_downsampled = F.interpolate( - gt_masks.unsqueeze(1).float(), target_shape, - mode='nearest').squeeze(1).long() - else: - gt_masks_downsampled = gt_masks - - # assign and sample - assign_result = self.assigner.assign(cls_score, mask_pred, gt_labels, - gt_masks_downsampled, img_metas) - sampling_result = self.sampler.sample(assign_result, mask_pred, - gt_masks) - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - - # label target - labels = gt_labels.new_full((self.num_queries, ), - self.num_classes, - dtype=torch.long) - labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] - label_weights = gt_labels.new_ones(self.num_queries) - - # mask target - mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] - mask_weights = mask_pred.new_zeros((self.num_queries, )) - mask_weights[pos_inds] = 1.0 - - return (labels, label_weights, mask_targets, mask_weights, pos_inds, - neg_inds) - - @force_fp32(apply_to=('all_cls_scores', 'all_mask_preds')) - def loss(self, all_cls_scores, all_mask_preds, gt_labels_list, - gt_masks_list, img_metas): - """Loss function. - - Args: - all_cls_scores (Tensor): Classification scores for all decoder - layers with shape (num_decoder, batch_size, num_queries, - cls_out_channels). Note `cls_out_channels` should includes - background. - all_mask_preds (Tensor): Mask scores for all decoder layers with - shape (num_decoder, batch_size, num_queries, h, w). - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (n, ). n is the sum of number of stuff type - and number of instance in a image. - gt_masks_list (list[Tensor]): Ground truth mask for each image with - shape (n, h, w). - img_metas (list[dict]): List of image meta information. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - num_dec_layers = len(all_cls_scores) - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - losses_cls, losses_mask, losses_dice = multi_apply( - self.loss_single, all_cls_scores, all_mask_preds, - all_gt_labels_list, all_gt_masks_list, img_metas_list) - - loss_dict = dict() - # loss from the last decoder layer - loss_dict['loss_cls'] = losses_cls[-1] - loss_dict['loss_mask'] = losses_mask[-1] - loss_dict['loss_dice'] = losses_dice[-1] - # loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_mask_i, loss_dice_i in zip( - losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i - loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i - num_dec_layer += 1 - return loss_dict - - def loss_single(self, cls_scores, mask_preds, gt_labels_list, - gt_masks_list, img_metas): - """Loss function for outputs from a single decoder layer. - - Args: - cls_scores (Tensor): Mask score logits from a single decoder layer - for all images. Shape (batch_size, num_queries, - cls_out_channels). Note `cls_out_channels` should includes - background. - mask_preds (Tensor): Mask logits for a pixel decoder for all - images. Shape (batch_size, num_queries, h, w). - gt_labels_list (list[Tensor]): Ground truth class indices for each - image, each with shape (n, ). n is the sum of number of stuff - types and number of instances in a image. - gt_masks_list (list[Tensor]): Ground truth mask for each image, - each with shape (n, h, w). - img_metas (list[dict]): List of image meta information. - - Returns: - tuple[Tensor]: Loss components for outputs from a single decoder\ - layer. - """ - num_imgs = cls_scores.size(0) - cls_scores_list = [cls_scores[i] for i in range(num_imgs)] - mask_preds_list = [mask_preds[i] for i in range(num_imgs)] - - (labels_list, label_weights_list, mask_targets_list, mask_weights_list, - num_total_pos, - num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, - gt_labels_list, gt_masks_list, - img_metas) - # shape (batch_size, num_queries) - labels = torch.stack(labels_list, dim=0) - # shape (batch_size, num_queries) - label_weights = torch.stack(label_weights_list, dim=0) - # shape (num_total_gts, h, w) - mask_targets = torch.cat(mask_targets_list, dim=0) - # shape (batch_size, num_queries) - mask_weights = torch.stack(mask_weights_list, dim=0) - - # classfication loss - # shape (batch_size * num_queries, ) - cls_scores = cls_scores.flatten(0, 1) - labels = labels.flatten(0, 1) - label_weights = label_weights.flatten(0, 1) - - class_weight = cls_scores.new_tensor(self.class_weight) - loss_cls = self.loss_cls( - cls_scores, - labels, - label_weights, - avg_factor=class_weight[labels].sum()) - - num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) - num_total_masks = max(num_total_masks, 1) - - # extract positive ones - # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) - mask_preds = mask_preds[mask_weights > 0] - target_shape = mask_targets.shape[-2:] - - if mask_targets.shape[0] == 0: - # zero match - loss_dice = mask_preds.sum() - loss_mask = mask_preds.sum() - return loss_cls, loss_mask, loss_dice - - # upsample to shape of target - # shape (num_total_gts, h, w) - mask_preds = F.interpolate( - mask_preds.unsqueeze(1), - target_shape, - mode='bilinear', - align_corners=False).squeeze(1) - - # dice loss - loss_dice = self.loss_dice( - mask_preds, mask_targets, avg_factor=num_total_masks) - - # mask loss - # FocalLoss support input of shape (n, num_class) - h, w = mask_preds.shape[-2:] - # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1) - mask_preds = mask_preds.reshape(-1, 1) - # shape (num_total_gts, h, w) -> (num_total_gts * h * w) - mask_targets = mask_targets.reshape(-1) - # target is (1 - mask_targets) !!! - loss_mask = self.loss_mask( - mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w) - - return loss_cls, loss_mask, loss_dice - - def forward(self, feats, img_metas): - """Forward function. - - Args: - feats (list[Tensor]): Features from the upstream network, each - is a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple: a tuple contains two elements. - - all_cls_scores (Tensor): Classification scores for each\ - scale level. Each is a 4D-tensor with shape\ - (num_decoder, batch_size, num_queries, cls_out_channels).\ - Note `cls_out_channels` should includes background. - - all_mask_preds (Tensor): Mask scores for each decoder\ - layer. Each with shape (num_decoder, batch_size,\ - num_queries, h, w). - """ - batch_size = len(img_metas) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - padding_mask = feats[-1].new_ones( - (batch_size, input_img_h, input_img_w), dtype=torch.float32) - for i in range(batch_size): - img_h, img_w, _ = img_metas[i]['img_shape'] - padding_mask[i, :img_h, :img_w] = 0 - padding_mask = F.interpolate( - padding_mask.unsqueeze(1), - size=feats[-1].shape[-2:], - mode='nearest').to(torch.bool).squeeze(1) - # when backbone is swin, memory is output of last stage of swin. - # when backbone is r50, memory is output of tranformer encoder. - mask_features, memory = self.pixel_decoder(feats, img_metas) - pos_embed = self.decoder_pe(padding_mask) - memory = self.decoder_input_proj(memory) - # shape (batch_size, c, h, w) -> (h*w, batch_size, c) - memory = memory.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1) - # shape (batch_size, h * w) - padding_mask = padding_mask.flatten(1) - # shape = (num_queries, embed_dims) - query_embed = self.query_embed.weight - # shape = (num_queries, batch_size, embed_dims) - query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) - target = torch.zeros_like(query_embed) - # shape (num_decoder, num_queries, batch_size, embed_dims) - out_dec = self.transformer_decoder( - query=target, - key=memory, - value=memory, - key_pos=pos_embed, - query_pos=query_embed, - key_padding_mask=padding_mask) - # shape (num_decoder, batch_size, num_queries, embed_dims) - out_dec = out_dec.transpose(1, 2) - - # cls_scores - all_cls_scores = self.cls_embed(out_dec) - - # mask_preds - mask_embed = self.mask_embed(out_dec) - all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed, - mask_features) - - return all_cls_scores, all_mask_preds - - def forward_train(self, - feats, - img_metas, - gt_bboxes, - gt_labels, - gt_masks, - gt_semantic_seg, - gt_bboxes_ignore=None): - """Forward function for training mode. - - Args: - feats (list[Tensor]): Multi-level features from the upstream - network, each is a 4D-tensor. - img_metas (list[Dict]): List of image information. - gt_bboxes (list[Tensor]): Each element is ground truth bboxes of - the image, shape (num_gts, 4). Not used here. - gt_labels (list[Tensor]): Each element is ground truth labels of - each box, shape (num_gts,). - gt_masks (list[BitmapMasks]): Each element is masks of instances - of a image, shape (num_gts, h, w). - gt_semantic_seg (list[tensor] | None): Each element is the ground - truth of semantic segmentation with the shape (N, H, W). - [0, num_thing_class - 1] means things, - [num_thing_class, num_class-1] means stuff, - 255 means VOID. It's None when training instance segmentation. - gt_bboxes_ignore (list[Tensor]): Ground truth bboxes to be - ignored. Defaults to None. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # not consider ignoring bboxes - assert gt_bboxes_ignore is None - - # forward - all_cls_scores, all_mask_preds = self(feats, img_metas) - - # preprocess ground truth - gt_labels, gt_masks = self.preprocess_gt(gt_labels, gt_masks, - gt_semantic_seg, img_metas) - - # loss - losses = self.loss(all_cls_scores, all_mask_preds, gt_labels, gt_masks, - img_metas) - - return losses - - def simple_test(self, feats, img_metas, **kwargs): - """Test without augmentaton. - - Args: - feats (list[Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple: A tuple contains two tensors. - - - mask_cls_results (Tensor): Mask classification logits,\ - shape (batch_size, num_queries, cls_out_channels). - Note `cls_out_channels` should includes background. - - mask_pred_results (Tensor): Mask logits, shape \ - (batch_size, num_queries, h, w). - """ - all_cls_scores, all_mask_preds = self(feats, img_metas) - mask_cls_results = all_cls_scores[-1] - mask_pred_results = all_mask_preds[-1] - - # upsample masks - img_shape = img_metas[0]['batch_input_shape'] - mask_pred_results = F.interpolate( - mask_pred_results, - size=(img_shape[0], img_shape[1]), - mode='bilinear', - align_corners=False) - - return mask_cls_results, mask_pred_results diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/__init__.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/__init__.py deleted file mode 100644 index 6f2fa823fb35fdd90c07065cc93238d08385ce8b..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .bfp import BFP -from .channel_mapper import ChannelMapper -from .ct_resnet_neck import CTResNetNeck -from .dilated_encoder import DilatedEncoder -from .dyhead import DyHead -from .fpg import FPG -from .fpn import FPN -from .fpn_carafe import FPN_CARAFE -from .hrfpn import HRFPN -from .nas_fpn import NASFPN -from .nasfcos_fpn import NASFCOS_FPN -from .pafpn import PAFPN -from .rfp import RFP -from .ssd_neck import SSDNeck -from .yolo_neck import YOLOV3Neck -from .yolox_pafpn import YOLOXPAFPN - -__all__ = [ - 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', - 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder', - 'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead' -] diff --git a/spaces/ronvolutional/sk-node/app/README.md b/spaces/ronvolutional/sk-node/app/README.md deleted file mode 100644 index 5c91169b0ca6508bb24301c957a9edea5abf2b01..0000000000000000000000000000000000000000 --- a/spaces/ronvolutional/sk-node/app/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# create-svelte - -Everything you need to build a Svelte project, powered by [`create-svelte`](https://github.com/sveltejs/kit/tree/master/packages/create-svelte). - -## Creating a project - -If you're seeing this, you've probably already done this step. Congrats! - -```bash -# create a new project in the current directory -npm create svelte@latest - -# create a new project in my-app -npm create svelte@latest my-app -``` - -## Developing - -Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server: - -```bash -npm run dev - -# or start the server and open the app in a new browser tab -npm run dev -- --open -``` - -## Building - -To create a production version of your app: - -```bash -npm run build -``` - -You can preview the production build with `npm run preview`. - -> To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment. diff --git a/spaces/ronvolutional/sk-node/app/src/app.html b/spaces/ronvolutional/sk-node/app/src/app.html deleted file mode 100644 index b555f2469a3d6e76d93e70e96ed3fbcf9c470480..0000000000000000000000000000000000000000 --- a/spaces/ronvolutional/sk-node/app/src/app.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - %sveltekit.head% - - -
      %sveltekit.body%
      - - diff --git a/spaces/ruslanmv/Clone-Your-Voice/vocoder/hparams.py b/spaces/ruslanmv/Clone-Your-Voice/vocoder/hparams.py deleted file mode 100644 index c1de9f7dcc2926735b80a28ed1226ff1b5824753..0000000000000000000000000000000000000000 --- a/spaces/ruslanmv/Clone-Your-Voice/vocoder/hparams.py +++ /dev/null @@ -1,44 +0,0 @@ -from synthesizer.hparams import hparams as _syn_hp - - -# Audio settings------------------------------------------------------------------------ -# Match the values of the synthesizer -sample_rate = _syn_hp.sample_rate -n_fft = _syn_hp.n_fft -num_mels = _syn_hp.num_mels -hop_length = _syn_hp.hop_size -win_length = _syn_hp.win_size -fmin = _syn_hp.fmin -min_level_db = _syn_hp.min_level_db -ref_level_db = _syn_hp.ref_level_db -mel_max_abs_value = _syn_hp.max_abs_value -preemphasis = _syn_hp.preemphasis -apply_preemphasis = _syn_hp.preemphasize - -bits = 9 # bit depth of signal -mu_law = True # Recommended to suppress noise if using raw bits in hp.voc_mode - # below - - -# WAVERNN / VOCODER -------------------------------------------------------------------------------- -voc_mode = 'RAW' # either 'RAW' (softmax on raw bits) or 'MOL' (sample from -# mixture of logistics) -voc_upsample_factors = (5, 5, 8) # NB - this needs to correctly factorise hop_length -voc_rnn_dims = 512 -voc_fc_dims = 512 -voc_compute_dims = 128 -voc_res_out_dims = 128 -voc_res_blocks = 10 - -# Training -voc_batch_size = 100 -voc_lr = 1e-4 -voc_gen_at_checkpoint = 5 # number of samples to generate at each checkpoint -voc_pad = 2 # this will pad the input so that the resnet can 'see' wider - # than input length -voc_seq_len = hop_length * 5 # must be a multiple of hop_length - -# Generating / Synthesizing -voc_gen_batched = True # very fast (realtime+) single utterance batched generation -voc_target = 8000 # target number of samples to be generated in each batch entry -voc_overlap = 400 # number of samples for crossfading between batches diff --git a/spaces/sahshd/ChuanhuChatGPT/Dockerfile b/spaces/sahshd/ChuanhuChatGPT/Dockerfile deleted file mode 100644 index 335c2dba28ba8c365de9306858462a59dea25f28..0000000000000000000000000000000000000000 --- a/spaces/sahshd/ChuanhuChatGPT/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.9 as builder -RUN apt-get update && apt-get install -y build-essential -COPY requirements.txt . -COPY requirements_advanced.txt . -RUN pip install --user -r requirements.txt -# RUN pip install --user -r requirements_advanced.txt - -FROM python:3.9 -MAINTAINER iskoldt -COPY --from=builder /root/.local /root/.local -ENV PATH=/root/.local/bin:$PATH -COPY . /app -WORKDIR /app -ENV dockerrun yes -CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"] diff --git a/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/app.py b/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/app.py deleted file mode 100644 index 7163191e31b990134dcf6c397ddb71a88d30cdfc..0000000000000000000000000000000000000000 --- a/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/app.py +++ /dev/null @@ -1,123 +0,0 @@ -import gradio as gr -import os -import numpy as np -os.system("pip install pdfminer.six rank_bm25 torch transformers") - -from gradio.mix import Series -#import re -from rank_bm25 import BM25Okapi -import string -import torch -from transformers import pipeline -import pdfminer -from pdfminer.high_level import extract_text - -len_doc = 500 -overlap = 15 -param_top_k_retriver = 15 -param_top_k_ranker = 3 - -def read_pdf(file): - text = extract_text(file.name) - # Split text into smaller docs - docs = [] - - i = 0 - while i < len(text): - docs.append(text[i:i+len_doc]) - i = i + len_doc - overlap - return docs - - # We use BM25 as retriver which will do 1st round of candidate filtering based on word based matching - -def bm25_tokenizer(text): - stop_w = ['a', 'the', 'am', 'is' , 'are', 'who', 'how', 'where', 'when', 'why', 'what'] - tokenized_doc = [] - for token in text.lower().split(): - token = token.strip(string.punctuation) - - if len(token) > 0 and token not in stop_w: - tokenized_doc.append(token) - return tokenized_doc - -def retrieval(query, top_k_retriver, docs, bm25_): - - bm25_scores = bm25_.get_scores(bm25_tokenizer(query)) - top_n = np.argsort(bm25_scores)[::-1][:top_k_retriver] - bm25_hits = [{'corpus_id': idx, - 'score': bm25_scores[idx], - 'docs':docs[idx]} for idx in top_n if bm25_scores[idx] > 0] - bm25_hits = sorted(bm25_hits, key=lambda x: x['score'], reverse=True) - - return bm25_hits - -def qa_ranker(query, docs_, top_k_ranker, qa_model): - ans = [] - for doc in docs_: - answer = qa_model(question = query, - context = doc) - answer['doc'] = doc - ans.append(answer) - return sorted(ans, key=lambda x: x['score'], reverse=True)[:top_k_ranker] - -def cstr(s, color='black'): - return "{}".format(color, s) -def cstr_bold(s, color='black'): - return "{}".format(color, s) -def cstr_break(s, color='black'): - return "
      {}
      ".format(color, s) - -def print_colored(text, start_idx, end_idx, confidence): - conf_str = '- Confidence: ' + confidence - a = cstr(' '.join([text[:start_idx], \ - cstr_bold(text[start_idx:end_idx], color='blue'), \ - text[end_idx:], \ - cstr_break(conf_str, color='grey')]), color='black') - return a - -def final_qa_pipeline(file, query, model_nm): - docs = read_pdf(file) - tokenized_corpus = [] - for doc in docs: - tokenized_corpus.append(bm25_tokenizer(doc)) - - bm25 = BM25Okapi(tokenized_corpus) - - top_k_retriver, top_k_ranker = param_top_k_retriver, param_top_k_ranker - lvl1 = retrieval(query, top_k_retriver, docs, bm25) - - qa_model = pipeline("question-answering", - #model = "deepset/minilm-uncased-squad2") - model = "deepset/"+ str(model_nm)) - - if len(lvl1) > 0: - fnl_rank = qa_ranker(query, [l["docs"] for l in lvl1], top_k_ranker,qa_model) - top1 = print_colored(fnl_rank[0]['doc'], fnl_rank[0]['start'], fnl_rank[0]['end'], str(np.round(100*fnl_rank[0]["score"],1))+"%") - if len(lvl1)>1: - top2 = print_colored(fnl_rank[1]['doc'], fnl_rank[1]['start'], fnl_rank[1]['end'], str(np.round(100*fnl_rank[1]["score"],1))+"%") - else: - top2 = "None" - return (top1, top2) - else: - return ("No match","No match") - -examples = [ - [os.path.abspath("dbs-annual-report-2020.pdf"), "how many times has DBS won Best bank in the world ?","minilm-uncased-squad2"], - [os.path.abspath("dbs-annual-report-2020.pdf"), "how much dividend was paid to shareholders ?","minilm-uncased-squad2"], - [os.path.abspath("dbs-annual-report-2020.pdf"), "what is the sustainability focus ?","minilm-uncased-squad2"], - [os.path.abspath("NASDAQ_AAPL_2020.pdf"), "how much are the outstanding shares ?","minilm-uncased-squad2"], - [os.path.abspath("NASDAQ_AAPL_2020.pdf"), "what is competitors strategy ?","minilm-uncased-squad2"], - [os.path.abspath("NASDAQ_AAPL_2020.pdf"), "who is the chief executive officer ?","minilm-uncased-squad2"], - [os.path.abspath("NASDAQ_MSFT_2020.pdf"), "How much is the guided revenue for next quarter?","minilm-uncased-squad2"], -] - -iface = gr.Interface( - fn = final_qa_pipeline, - inputs = [gr.inputs.File(label="input pdf file"), gr.inputs.Textbox(label="Question:"), gr.inputs.Dropdown(choices=["minilm-uncased-squad2","roberta-base-squad2"],label="Model")], - outputs = [gr.outputs.HTML(label="Top 1 answer"), gr.outputs.HTML(label="Top 2 answer")], - examples=examples, - theme = "grass", - title = "Question Answering on annual reports", - description = "Navigate long annual reports by using Machine learning to answer your questions. \nSimply upload any annual report pdf you are interested in and ask model a question OR load an example from below." - ) -iface.launch(enable_queue = True) \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/21 Jump Street 720p Yify 208 UPD.md b/spaces/scedlatioru/img-to-music/example/21 Jump Street 720p Yify 208 UPD.md deleted file mode 100644 index 37ee8e323ecd7aafa81a3bc87a49cc0ca3db8faf..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/21 Jump Street 720p Yify 208 UPD.md +++ /dev/null @@ -1,6 +0,0 @@ -

      21 jump street 720p yify 208


      Download ☆☆☆☆☆ https://gohhs.com/2uEAtr



      -
      -Iron Man 3[720p]1.33GB, 8 years, Movie, 5, 1.33 GB, 0, 0. Magnet Link · Iron Man Trilogy(2008-2013) 720p BRRiP X264 AAC 5.1 [Team Nanban, 8 years, Movie ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/IntroduccionALaPsicologiaRobertFeldmanPdf.md b/spaces/scedlatioru/img-to-music/example/IntroduccionALaPsicologiaRobertFeldmanPdf.md deleted file mode 100644 index 3c2fc7b4b7b1333b6e926a10848140744793729f..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/IntroduccionALaPsicologiaRobertFeldmanPdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

      IntroduccionALaPsicologiaRobertFeldmanPdf


      Download Zip 🆗 https://gohhs.com/2uEABa



      -
      -INTRODUCCIÓN. A LA PSICOLOGÍA. Un enfoque ecosistémico. Rogelio Díaz-Guerrero. Rolando Díaz-Loving. ISBN 978-968-24-5406-6. CONCEPTOS. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/Microsoft Flight Simulator X CPY Crack High Quality Torrent Free Download [2020].md b/spaces/scedlatioru/img-to-music/example/Microsoft Flight Simulator X CPY Crack High Quality Torrent Free Download [2020].md deleted file mode 100644 index ae180abcd2452bdcf410e74231dd882097790dad..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Microsoft Flight Simulator X CPY Crack High Quality Torrent Free Download [2020].md +++ /dev/null @@ -1,178 +0,0 @@ - -

      Microsoft Flight Simulator X CPY Crack Torrent Free Download [2020]

      - -

      If you are a fan of flight simulation games, you might have heard of Microsoft Flight Simulator X, one of the most realistic and immersive flight simulators ever created. Microsoft Flight Simulator X is the tenth and final installment of the Microsoft Flight Simulator series, which was first launched in 1982. It was released in 2006 for Windows and is still popular among flight enthusiasts and gamers.

      - -

      Microsoft Flight Simulator X offers a wide range of features and options that make it a unique and enjoyable experience. You can choose from over 80 different aircraft, ranging from light planes to wide-body jets, and fly them in various weather conditions and scenarios. You can also create your own flight plan and fly anywhere on the planet, with over 24,000 airports and realistic scenery based on satellite imagery. You can also take on various missions and challenges, such as rescue operations, air races, combat scenarios, and more.

      -

      Microsoft Flight Simulator X CPY Crack Torrent Free Download [2020]


      Download Filehttps://gohhs.com/2uEzTe



      - -

      However, if you want to play Microsoft Flight Simulator X on your PC, you might face some difficulties. The game is not compatible with newer versions of Windows, such as Windows 10, and requires a lot of system resources to run smoothly. Moreover, the game is not available for free, and you need to purchase it from online stores or physical copies.

      - -

      How to Download Microsoft Flight Simulator X CPY Crack Torrent for Free?

      - -

      If you want to download Microsoft Flight Simulator X CPY Crack Torrent for free, you can follow these simple steps:

      - -
        -
      1. Visit a torrent website that offers Microsoft Flight Simulator X CPY Crack Torrent, such as SkidrowCPY.Games or CPYGames.Site.
      2. -
      3. Search for Microsoft Flight Simulator X CPY Crack Torrent using the search bar or browse the categories.
      4. -
      5. Select the torrent file that has the most seeders and leechers and click on the download link.
      6. -
      7. You will need a torrent client software to download the torrent file, such as uTorrent or BitTorrent.
      8. -
      9. Open the torrent file with your torrent client and choose a location to save the game files.
      10. -
      11. Wait for the download to complete. It might take some time depending on your internet speed and the size of the game files.
      12. -
      13. Once the download is finished, you will have a folder with the game files and a crack folder.
      14. -
      15. Copy the contents of the crack folder and paste them into the game folder, replacing the original files.
      16. -
      17. Run the game as administrator and enjoy!
      18. -
      - -

      What are the Benefits of Downloading Microsoft Flight Simulator X CPY Crack Torrent?

      - -

      By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can enjoy many benefits such as:

      - -
        -
      • You can play Microsoft Flight Simulator X on your PC without any compatibility issues or errors.
      • -
      • You can play Microsoft Flight Simulator X for free without spending any money or registering any account.
      • -
      • You can play Microsoft Flight Simulator X offline without any internet connection or online activation.
      • -
      • You can play Microsoft Flight Simulator X with all the features and options unlocked and available.
      • -
      • You can play Microsoft Flight Simulator X with high graphics quality and performance.
      • -
      - -

      Conclusion

      - -

      Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can play this amazing game on your PC for free and without any hassle. So what are you waiting for? Download Microsoft Flight Simulator X CPY Crack Torrent today and start your flight adventure!

      -

      What is CPY and How Does It Work?

      - -

      CPY is a group of hackers and crackers who are known for cracking and releasing various games for free. CPY is one of the most popular and respected groups in the scene, and they have cracked many games that were protected by strong anti-piracy measures, such as Denuvo.

      - -

      CPY works by reverse-engineering the game files and finding the encryption keys and algorithms that are used to protect the game from unauthorized copying and modification. They then create a crack file that bypasses or removes these protections and allows the game to run without any restrictions or limitations.

      -

      - -

      CPY releases their cracks and games on various torrent websites, where users can download them for free. However, CPY does not provide any support or updates for their cracks and games, and they are not responsible for any damages or issues that may arise from using them.

      - -

      What are the Risks of Downloading Microsoft Flight Simulator X CPY Crack Torrent?

      - -

      While downloading Microsoft Flight Simulator X CPY Crack Torrent may seem tempting and convenient, it also comes with some risks and disadvantages that you should be aware of. Some of the risks are:

      - -
        -
      • You may download a fake or malicious file that may harm your computer or steal your personal information.
      • -
      • You may violate the intellectual property rights of the game developers and publishers, and face legal consequences or penalties.
      • -
      • You may not be able to access the online features and updates of the game, such as multiplayer mode, patches, DLCs, etc.
      • -
      • You may encounter bugs, errors, crashes, or performance issues that may ruin your gaming experience.
      • -
      • You may miss out on the satisfaction and enjoyment of buying and playing a legitimate copy of the game.
      • -
      - -

      How to Buy and Play Microsoft Flight Simulator X Legally?

      - -

      If you want to buy and play Microsoft Flight Simulator X legally, you can follow these simple steps:

      - -
        -
      1. Visit an online store that sells Microsoft Flight Simulator X, such as Steam or Amazon.
      2. -
      3. Search for Microsoft Flight Simulator X using the search bar or browse the categories.
      4. -
      5. Select the edition and version of the game that you want to buy.
      6. -
      7. Add the game to your cart and proceed to checkout.
      8. -
      9. Enter your payment details and confirm your order.
      10. -
      11. You will receive an email with a download link or a product key for the game.
      12. -
      13. Download and install the game on your PC using the link or the key.
      14. -
      15. Launch the game and enjoy!
      16. -
      - -

      Conclusion

      - -

      Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By buying and playing Microsoft Flight Simulator X legally, you can support the game developers and publishers, and enjoy all the features and benefits of the game. So what are you waiting for? Buy Microsoft Flight Simulator X today and start your flight adventure!

      -

      What are the System Requirements for Microsoft Flight Simulator X?

      - -

      Microsoft Flight Simulator X is a demanding game that requires a powerful PC to run smoothly. Here are the minimum and recommended system requirements for Microsoft Flight Simulator X:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      MinimumRecommended
      CPU: Intel Core 2 Duo E8400 or AMD Athlon X2 6000+CPU: Intel Core i5-3570K or AMD FX-8320
      RAM: 2 GBRAM: 8 GB
      OS: Windows XP SP2 or laterOS: Windows 10 64-bit
      GPU: Nvidia GeForce 8800 GT or AMD Radeon HD 5670GPU: Nvidia GeForce GTX 1050 Ti or AMD Radeon RX 560
      VRAM: 256 MBVRAM: 4 GB
      DirectX: Version 9.0cDirectX: Version 11
      HDD: 30 GBHDD: 30 GB
      Sound Card: DirectX Compatible Sound Card with latest driversSound Card: DirectX Compatible Sound Card with latest drivers
      - -

      You can check your PC specifications and compare them with the system requirements using various online tools, such as Can You Run It or PCGameBenchmark.

      - -

      How to Optimize Microsoft Flight Simulator X for Better Performance?

      - -

      If you are facing any issues or problems while playing Microsoft Flight Simulator X, such as low FPS, stuttering, lagging, crashing, etc., you can try some of these tips and tricks to optimize Microsoft Flight Simulator X for better performance:

      - -
        -
      • Update your graphics card drivers and DirectX to the latest versions.
      • -
      • Close any unnecessary background programs and processes that may consume your CPU, RAM, or bandwidth.
      • -
      • Adjust your graphics settings in the game options menu according to your PC specifications and preferences. You can lower some settings, such as resolution, anti-aliasing, texture quality, shadows, etc., to improve your FPS and reduce lag.
      • -
      • Clean your PC from any junk files, viruses, malware, etc., that may affect your system performance and stability.
      • -
      • Defragment your hard drive to improve your loading times and disk speed.
      • -
      • Use a game booster software, such as Razer Cortex or Wise Game Booster, to optimize your PC settings and resources for gaming.
      • -
      • Use a VPN service, such as NordVPN or ExpressVPN, to improve your online connection and reduce ping.
      • -
      • Contact Microsoft support or visit their official forums if you encounter any bugs, errors, or glitches that may prevent you from playing the game properly.
      • -
      - -

      Conclusion

      - -

      Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can play this amazing game on your PC for free and without any hassle. So what are you waiting for? Download Microsoft Flight Simulator X CPY Crack Torrent today and start your flight adventure!

      -

      How to Install Microsoft Flight Simulator X CPY Crack Torrent?

      - -

      After downloading Microsoft Flight Simulator X CPY Crack Torrent, you need to install it on your PC to play the game. Here are the steps to install Microsoft Flight Simulator X CPY Crack Torrent:

      - -
        -
      1. Extract the downloaded torrent file using a software such as WinRAR or 7-Zip.
      2. -
      3. Open the extracted folder and run the setup.exe file as administrator.
      4. -
      5. Follow the instructions on the screen and choose a location to install the game.
      6. -
      7. Wait for the installation to complete. It may take some time depending on your PC specifications and disk speed.
      8. -
      9. Copy the contents of the crack folder and paste them into the game installation folder, replacing the original files.
      10. -
      11. Run the game as administrator and enjoy!
      12. -
      - -

      How to Troubleshoot Microsoft Flight Simulator X CPY Crack Torrent?

      - -

      If you encounter any problems or errors while playing Microsoft Flight Simulator X CPY Crack Torrent, such as black screen, missing DLL files, activation error, etc., you can try some of these solutions to troubleshoot Microsoft Flight Simulator X CPY Crack Torrent:

      - -
        -
      • Make sure your PC meets the minimum or recommended system requirements for Microsoft Flight Simulator X.
      • -
      • Make sure your graphics card drivers and DirectX are updated to the latest versions.
      • -
      • Make sure you have installed Microsoft Flight Simulator X CPY Crack Torrent correctly and copied the crack files properly.
      • -
      • Make sure you have disabled your antivirus or firewall software before running the game, as they may interfere with the crack or block the game files.
      • -
      • Make sure you have run the game as administrator and given it full permissions.
      • -
      • Make sure you have installed any required software or components for the game, such as Microsoft Visual C++, .NET Framework, etc.
      • -
      • Contact Microsoft support or visit their official forums if you encounter any bugs, glitches, or issues that may prevent you from playing the game properly.
      • -
      - -

      Conclusion

      - -

      Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can play this amazing game on your PC for free and without any hassle. So what are you waiting for? Download Microsoft Flight Simulator X CPY Crack Torrent today and start your flight adventure!

      -

      Conclusion

      - -

      Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can play this amazing game on your PC for free and without any hassle. So what are you waiting for? Download Microsoft Flight Simulator X CPY Crack Torrent today and start your flight adventure!

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Punto De Ventas Plus 5.95 ((INSTALL)) Keygen.md b/spaces/scedlatioru/img-to-music/example/Punto De Ventas Plus 5.95 ((INSTALL)) Keygen.md deleted file mode 100644 index faca091eeca3aec8e21bbce22e70ee629b562423..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Punto De Ventas Plus 5.95 ((INSTALL)) Keygen.md +++ /dev/null @@ -1,14 +0,0 @@ -

      punto de ventas plus 5.95 keygen


      Download Ziphttps://gohhs.com/2uEzk7



      -
      -pintalibro.es/project-waste-management/paint-factory/genvex-hardwares-2015-premier-series-of-collecting-impression-cret-creative-stop-by-bristol-2015-by-bristol-plaza/ It is apparent that you're excited by some sort of website. Its look is excellent and the content material on the website appears to be very useful. Is perhaps the information on this site that is a beginner? - -pintalibro.es/project-waste-management/paint-factory/genvex-hardwares-2015-premier-series-of-collecting-impression-cret-creative-stop-by-bristol-2015-by-bristol-plaza/ The very first thing you want to do is have a niche that you would like to participate in. Are you actually seeking to create a site about Grommets? Do you have any special objectives in mind? What is your mission for the site? You must answer these questions in your mind before you begin. - -pintalibro.es/project-waste-management/paint-factory/genvex-hardwares-2015-premier-series-of-collecting-impression-cret-creative-stop-by-bristol-2015-by-bristol-plaza/ There is not any doubt that you intend to do a website about Grommets. Your blog is for those who really would love to obtain more information about the subject. - -pintalibro.es/project-waste-management/paint-factory/genvex-hardwares-2015-premier-series-of-collecting-impression-cret-creative-stop-by-bristol-2015-by-bristol-plaza/ Do you want to really get the visitor traffic that you may benefit from? If yes, then a very good approach to go about this is to include a few popular topics on your site. If you can really keep up with the latest news, it is a superb means to provide your blog the interest that it needs and need to be known. - -pintalibro.es/project-waste-management/paint 4fefd39f24
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/Zooskool Carmen Nubian Petlove.md b/spaces/scedlatioru/img-to-music/example/Zooskool Carmen Nubian Petlove.md deleted file mode 100644 index 573e421bc432f55cbfde3890f8c4b92b0507b9cf..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Zooskool Carmen Nubian Petlove.md +++ /dev/null @@ -1,28 +0,0 @@ -

      Zooskool Carmen Nubian Petlove


      Downloadhttps://gohhs.com/2uEzoQ



      - -Monster truck porn - disney girl porn - -A very young boy wants to know if his father will be home soon. Carmen contida abriendose los pechos - Video de grupos de hombres enojados por complacer a su pareja. - -A young boy is wondering what his father is doing today. New York Lesbian – Lesbian Party · Lesbians are going to the gym. - -A very young boy wants to know if his father will be home soon. A young boy and a very young girl are busy talking. A young boy wants to know if his father will be home soon. A very young boy wants to know if his father will be home soon. A young boy and a very young girl are busy talking. A very young boy wants to know if his father will be home soon. A young boy and a very young girl are busy talking. A very young boy wants to know if his father will be home soon. A very young boy wants to know if his father will be home soon. A young boy and a very young girl are busy talking. A very young boy wants to know if his father will be home soon.Evaluation of an education and research alliance at a university cancer center. - -Developments in oncology practice have led to a change in expectations regarding the educational requirements of providers in clinical and research settings. In response, the authors developed and implemented a college-based training program for a newly established cancer center. The authors describe the development of the program, the reactions of participants, and the results of the program. The most significant effect of the program was the involvement of first-year medical students in basic science research. The program was also found to have positive effects on interprofessional education in the cancer center and on the development of junior faculty.We are Proud to be your local professional plumbing and water - -supply specialists - -We can take care of your needs! - -Whether it be a plumbing installation, repair or replacement, our technicians will provide you with friendly and professional service at a reasonable rate. - -Vital Services of Illinois - -When it comes to hiring a licensed plumber, you want to find someone who you can trust, as well as someone who you can rely on to get the job done right. - -Our company is proud to be an Illinois plumbing and water supply specialist. - -Our team of well trained technicians are well versed in both residential and commercial plumbing, as well as water systems. 4fefd39f24
      -
      -
      -

      diff --git a/spaces/sdeeas/ChuanhuChatGPT/modules/base_model.py b/spaces/sdeeas/ChuanhuChatGPT/modules/base_model.py deleted file mode 100644 index 2b55623f6b0989f60d818be6e0e77f5948484b82..0000000000000000000000000000000000000000 --- a/spaces/sdeeas/ChuanhuChatGPT/modules/base_model.py +++ /dev/null @@ -1,561 +0,0 @@ -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import commentjson as cjson -import os -import sys -import requests -import urllib3 -import traceback - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp -from enum import Enum - -from .presets import * -from .llama_func import * -from .utils import * -from . import shared -from .config import retrieve_proxy - - -class ModelType(Enum): - Unknown = -1 - OpenAI = 0 - ChatGLM = 1 - LLaMA = 2 - XMChat = 3 - - @classmethod - def get_type(cls, model_name: str): - model_type = None - model_name_lower = model_name.lower() - if "gpt" in model_name_lower: - model_type = ModelType.OpenAI - elif "chatglm" in model_name_lower: - model_type = ModelType.ChatGLM - elif "llama" in model_name_lower or "alpaca" in model_name_lower: - model_type = ModelType.LLaMA - elif "xmchat" in model_name_lower: - model_type = ModelType.XMChat - else: - model_type = ModelType.Unknown - return model_type - - -class BaseLLMModel: - def __init__( - self, - model_name, - system_prompt="", - temperature=1.0, - top_p=1.0, - n_choices=1, - stop=None, - max_generation_token=None, - presence_penalty=0, - frequency_penalty=0, - logit_bias=None, - user="", - ) -> None: - self.history = [] - self.all_token_counts = [] - self.model_name = model_name - self.model_type = ModelType.get_type(model_name) - try: - self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name] - except KeyError: - self.token_upper_limit = DEFAULT_TOKEN_LIMIT - self.interrupted = False - self.system_prompt = system_prompt - self.api_key = None - self.need_api_key = False - self.single_turn = False - - self.temperature = temperature - self.top_p = top_p - self.n_choices = n_choices - self.stop_sequence = stop - self.max_generation_token = None - self.presence_penalty = presence_penalty - self.frequency_penalty = frequency_penalty - self.logit_bias = logit_bias - self.user_identifier = user - - def get_answer_stream_iter(self): - """stream predict, need to be implemented - conversations are stored in self.history, with the most recent question, in OpenAI format - should return a generator, each time give the next word (str) in the answer - """ - logging.warning("stream predict not implemented, using at once predict instead") - response, _ = self.get_answer_at_once() - yield response - - def get_answer_at_once(self): - """predict at once, need to be implemented - conversations are stored in self.history, with the most recent question, in OpenAI format - Should return: - the answer (str) - total token count (int) - """ - logging.warning("at once predict not implemented, using stream predict instead") - response_iter = self.get_answer_stream_iter() - count = 0 - for response in response_iter: - count += 1 - return response, sum(self.all_token_counts) + count - - def billing_info(self): - """get billing infomation, inplement if needed""" - logging.warning("billing info not implemented, using default") - return BILLING_NOT_APPLICABLE_MSG - - def count_token(self, user_input): - """get token count from input, implement if needed""" - logging.warning("token count not implemented, using default") - return len(user_input) - - def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""): - def get_return_value(): - return chatbot, status_text - - status_text = i18n("开始实时传输回答……") - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - - user_token_count = self.count_token(inputs) - self.all_token_counts.append(user_token_count) - logging.debug(f"输入token计数: {user_token_count}") - - stream_iter = self.get_answer_stream_iter() - - for partial_text in stream_iter: - chatbot[-1] = (chatbot[-1][0], partial_text + display_append) - self.all_token_counts[-1] += 1 - status_text = self.token_message() - yield get_return_value() - if self.interrupted: - self.recover() - break - self.history.append(construct_assistant(partial_text)) - - def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""): - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - if fake_input is not None: - user_token_count = self.count_token(fake_input) - else: - user_token_count = self.count_token(inputs) - self.all_token_counts.append(user_token_count) - ai_reply, total_token_count = self.get_answer_at_once() - self.history.append(construct_assistant(ai_reply)) - if fake_input is not None: - self.history[-2] = construct_user(fake_input) - chatbot[-1] = (chatbot[-1][0], ai_reply + display_append) - if fake_input is not None: - self.all_token_counts[-1] += count_token(construct_assistant(ai_reply)) - else: - self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts) - status_text = self.token_message() - return chatbot, status_text - - def handle_file_upload(self, files, chatbot): - """if the model accepts multi modal input, implement this function""" - status = gr.Markdown.update() - if files: - construct_index(self.api_key, file_src=files) - status = "索引构建完成" - return gr.Files.update(), chatbot, status - - def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot): - fake_inputs = None - display_append = [] - limited_context = False - fake_inputs = real_inputs - if files: - from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery - from llama_index.indices.query.schema import QueryBundle - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - from langchain.chat_models import ChatOpenAI - from llama_index import ( - GPTSimpleVectorIndex, - ServiceContext, - LangchainEmbedding, - OpenAIEmbedding, - ) - limited_context = True - msg = "加载索引中……" - logging.info(msg) - # yield chatbot + [(inputs, "")], msg - index = construct_index(self.api_key, file_src=files) - assert index is not None, "获取索引失败" - msg = "索引获取成功,生成回答中……" - logging.info(msg) - if local_embedding or self.model_type != ModelType.OpenAI: - embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2")) - else: - embed_model = OpenAIEmbedding() - # yield chatbot + [(inputs, "")], msg - with retrieve_proxy(): - prompt_helper = PromptHelper( - max_input_size=4096, - num_output=5, - max_chunk_overlap=20, - chunk_size_limit=600, - ) - from llama_index import ServiceContext - - service_context = ServiceContext.from_defaults( - prompt_helper=prompt_helper, embed_model=embed_model - ) - query_object = GPTVectorStoreIndexQuery( - index.index_struct, - service_context=service_context, - similarity_top_k=5, - vector_store=index._vector_store, - docstore=index._docstore, - ) - query_bundle = QueryBundle(real_inputs) - nodes = query_object.retrieve(query_bundle) - reference_results = [n.node.text for n in nodes] - reference_results = add_source_numbers(reference_results, use_source=False) - display_append = add_details(reference_results) - display_append = "\n\n" + "".join(display_append) - real_inputs = ( - replace_today(PROMPT_TEMPLATE) - .replace("{query_str}", real_inputs) - .replace("{context_str}", "\n\n".join(reference_results)) - .replace("{reply_language}", reply_language) - ) - elif use_websearch: - limited_context = True - search_results = ddg(real_inputs, max_results=5) - reference_results = [] - for idx, result in enumerate(search_results): - logging.debug(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - reference_results.append([result["body"], result["href"]]) - display_append.append( - # f"{idx+1}. [{domain_name}]({result['href']})\n" - f"
    12. {domain_name}
    13. \n" - ) - reference_results = add_source_numbers(reference_results) - display_append = "
        \n\n" + "".join(display_append) + "
      " - real_inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", real_inputs) - .replace("{web_results}", "\n\n".join(reference_results)) - .replace("{reply_language}", reply_language) - ) - else: - display_append = "" - return limited_context, fake_inputs, display_append, real_inputs, chatbot - - def predict( - self, - inputs, - chatbot, - stream=False, - use_websearch=False, - files=None, - reply_language="中文", - should_check_token_count=True, - ): # repetition_penalty, top_k - - status_text = "开始生成回答……" - logging.info( - "输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL - ) - if should_check_token_count: - yield chatbot + [(inputs, "")], status_text - if reply_language == "跟随问题语言(不稳定)": - reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch." - - limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot) - yield chatbot + [(fake_inputs, "")], status_text - - if ( - self.need_api_key and - self.api_key is None - and not shared.state.multi_api_key - ): - status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG - logging.info(status_text) - chatbot.append((inputs, "")) - if len(self.history) == 0: - self.history.append(construct_user(inputs)) - self.history.append("") - self.all_token_counts.append(0) - else: - self.history[-2] = construct_user(inputs) - yield chatbot + [(inputs, "")], status_text - return - elif len(inputs.strip()) == 0: - status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG - logging.info(status_text) - yield chatbot + [(inputs, "")], status_text - return - - if self.single_turn: - self.history = [] - self.all_token_counts = [] - self.history.append(construct_user(inputs)) - - try: - if stream: - logging.debug("使用流式传输") - iter = self.stream_next_chatbot( - inputs, - chatbot, - fake_input=fake_inputs, - display_append=display_append, - ) - for chatbot, status_text in iter: - yield chatbot, status_text - else: - logging.debug("不使用流式传输") - chatbot, status_text = self.next_chatbot_at_once( - inputs, - chatbot, - fake_input=fake_inputs, - display_append=display_append, - ) - yield chatbot, status_text - except Exception as e: - traceback.print_exc() - status_text = STANDARD_ERROR_MSG + str(e) - yield chatbot, status_text - - if len(self.history) > 1 and self.history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{self.history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if limited_context: - # self.history = self.history[-4:] - # self.all_token_counts = self.all_token_counts[-2:] - self.history = [] - self.all_token_counts = [] - - max_token = self.token_upper_limit - TOKEN_OFFSET - - if sum(self.all_token_counts) > max_token and should_check_token_count: - count = 0 - while ( - sum(self.all_token_counts) - > self.token_upper_limit * REDUCE_TOKEN_FACTOR - and sum(self.all_token_counts) > 0 - ): - count += 1 - del self.all_token_counts[0] - del self.history[:2] - logging.info(status_text) - status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话" - yield chatbot, status_text - - def retry( - self, - chatbot, - stream=False, - use_websearch=False, - files=None, - reply_language="中文", - ): - logging.debug("重试中……") - if len(self.history) > 0: - inputs = self.history[-2]["content"] - del self.history[-2:] - self.all_token_counts.pop() - elif len(chatbot) > 0: - inputs = chatbot[-1][0] - else: - yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的" - return - - iter = self.predict( - inputs, - chatbot, - stream=stream, - use_websearch=use_websearch, - files=files, - reply_language=reply_language, - ) - for x in iter: - yield x - logging.debug("重试完毕") - - # def reduce_token_size(self, chatbot): - # logging.info("开始减少token数量……") - # chatbot, status_text = self.next_chatbot_at_once( - # summarize_prompt, - # chatbot - # ) - # max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR - # num_chat = find_n(self.all_token_counts, max_token_count) - # logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats") - # chatbot = chatbot[:-1] - # self.history = self.history[-2*num_chat:] if num_chat > 0 else [] - # self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else [] - # msg = f"保留了最近{num_chat}轮对话" - # logging.info(msg) - # logging.info("减少token数量完毕") - # return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0]) - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_token_upper_limit(self, new_upper_limit): - self.token_upper_limit = new_upper_limit - print(f"token上限设置为{new_upper_limit}") - - def set_temperature(self, new_temperature): - self.temperature = new_temperature - - def set_top_p(self, new_top_p): - self.top_p = new_top_p - - def set_n_choices(self, new_n_choices): - self.n_choices = new_n_choices - - def set_stop_sequence(self, new_stop_sequence: str): - new_stop_sequence = new_stop_sequence.split(",") - self.stop_sequence = new_stop_sequence - - def set_max_tokens(self, new_max_tokens): - self.max_generation_token = new_max_tokens - - def set_presence_penalty(self, new_presence_penalty): - self.presence_penalty = new_presence_penalty - - def set_frequency_penalty(self, new_frequency_penalty): - self.frequency_penalty = new_frequency_penalty - - def set_logit_bias(self, logit_bias): - logit_bias = logit_bias.split() - bias_map = {} - encoding = tiktoken.get_encoding("cl100k_base") - for line in logit_bias: - word, bias_amount = line.split(":") - if word: - for token in encoding.encode(word): - bias_map[token] = float(bias_amount) - self.logit_bias = bias_map - - def set_user_identifier(self, new_user_identifier): - self.user_identifier = new_user_identifier - - def set_system_prompt(self, new_system_prompt): - self.system_prompt = new_system_prompt - - def set_key(self, new_access_key): - self.api_key = new_access_key.strip() - msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key) - logging.info(msg) - return self.api_key, msg - - def set_single_turn(self, new_single_turn): - self.single_turn = new_single_turn - - def reset(self): - self.history = [] - self.all_token_counts = [] - self.interrupted = False - return [], self.token_message([0]) - - def delete_first_conversation(self): - if self.history: - del self.history[:2] - del self.all_token_counts[0] - return self.token_message() - - def delete_last_conversation(self, chatbot): - if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]: - msg = "由于包含报错信息,只删除chatbot记录" - chatbot.pop() - return chatbot, self.history - if len(self.history) > 0: - self.history.pop() - self.history.pop() - if len(chatbot) > 0: - msg = "删除了一组chatbot对话" - chatbot.pop() - if len(self.all_token_counts) > 0: - msg = "删除了一组对话的token计数记录" - self.all_token_counts.pop() - msg = "删除了一组对话" - return chatbot, msg - - def token_message(self, token_lst=None): - if token_lst is None: - token_lst = self.all_token_counts - token_sum = 0 - for i in range(len(token_lst)): - token_sum += sum(token_lst[: i + 1]) - return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens" - - def save_chat_history(self, filename, chatbot, user_name): - if filename == "": - return - if not filename.endswith(".json"): - filename += ".json" - return save_file(filename, self.system_prompt, self.history, chatbot, user_name) - - def export_markdown(self, filename, chatbot, user_name): - if filename == "": - return - if not filename.endswith(".md"): - filename += ".md" - return save_file(filename, self.system_prompt, self.history, chatbot, user_name) - - def load_chat_history(self, filename, chatbot, user_name): - logging.debug(f"{user_name} 加载对话历史中……") - if type(filename) != str: - filename = filename.name - try: - with open(os.path.join(HISTORY_DIR, user_name, filename), "r") as f: - json_s = json.load(f) - try: - if type(json_s["history"][0]) == str: - logging.info("历史记录格式为旧版,正在转换……") - new_history = [] - for index, item in enumerate(json_s["history"]): - if index % 2 == 0: - new_history.append(construct_user(item)) - else: - new_history.append(construct_assistant(item)) - json_s["history"] = new_history - logging.info(new_history) - except: - # 没有对话历史 - pass - logging.debug(f"{user_name} 加载对话历史完毕") - self.history = json_s["history"] - return filename, json_s["system"], json_s["chatbot"] - except FileNotFoundError: - logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作") - return filename, self.system_prompt, chatbot - - def like(self): - """like the last response, implement if needed - """ - return gr.update() - - def dislike(self): - """dislike the last response, implement if needed - """ - return gr.update() diff --git a/spaces/segments-tobias/conex/espnet/asr/__init__.py b/spaces/segments-tobias/conex/espnet/asr/__init__.py deleted file mode 100644 index b7f177368e62a5578b8706300e101f831a3972ac..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/asr/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Initialize sub package.""" diff --git a/spaces/shgao/MDT/diffusion/timestep_sampler.py b/spaces/shgao/MDT/diffusion/timestep_sampler.py deleted file mode 100644 index a3f369847677d8dbaaadb8297691b1be92cf189f..0000000000000000000000000000000000000000 --- a/spaces/shgao/MDT/diffusion/timestep_sampler.py +++ /dev/null @@ -1,150 +0,0 @@ -# Modified from OpenAI's diffusion repos -# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py -# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion -# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py - -from abc import ABC, abstractmethod - -import numpy as np -import torch as th -import torch.distributed as dist - - -def create_named_schedule_sampler(name, diffusion): - """ - Create a ScheduleSampler from a library of pre-defined samplers. - :param name: the name of the sampler. - :param diffusion: the diffusion object to sample for. - """ - if name == "uniform": - return UniformSampler(diffusion) - elif name == "loss-second-moment": - return LossSecondMomentResampler(diffusion) - else: - raise NotImplementedError(f"unknown schedule sampler: {name}") - - -class ScheduleSampler(ABC): - """ - A distribution over timesteps in the diffusion process, intended to reduce - variance of the objective. - By default, samplers perform unbiased importance sampling, in which the - objective's mean is unchanged. - However, subclasses may override sample() to change how the resampled - terms are reweighted, allowing for actual changes in the objective. - """ - - @abstractmethod - def weights(self): - """ - Get a numpy array of weights, one per diffusion step. - The weights needn't be normalized, but must be positive. - """ - - def sample(self, batch_size, device): - """ - Importance-sample timesteps for a batch. - :param batch_size: the number of timesteps. - :param device: the torch device to save to. - :return: a tuple (timesteps, weights): - - timesteps: a tensor of timestep indices. - - weights: a tensor of weights to scale the resulting losses. - """ - w = self.weights() - p = w / np.sum(w) - indices_np = np.random.choice(len(p), size=(batch_size,), p=p) - indices = th.from_numpy(indices_np).long().to(device) - weights_np = 1 / (len(p) * p[indices_np]) - weights = th.from_numpy(weights_np).float().to(device) - return indices, weights - - -class UniformSampler(ScheduleSampler): - def __init__(self, diffusion): - self.diffusion = diffusion - self._weights = np.ones([diffusion.num_timesteps]) - - def weights(self): - return self._weights - - -class LossAwareSampler(ScheduleSampler): - def update_with_local_losses(self, local_ts, local_losses): - """ - Update the reweighting using losses from a model. - Call this method from each rank with a batch of timesteps and the - corresponding losses for each of those timesteps. - This method will perform synchronization to make sure all of the ranks - maintain the exact same reweighting. - :param local_ts: an integer Tensor of timesteps. - :param local_losses: a 1D Tensor of losses. - """ - batch_sizes = [ - th.tensor([0], dtype=th.int32, device=local_ts.device) - for _ in range(dist.get_world_size()) - ] - dist.all_gather( - batch_sizes, - th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device), - ) - - # Pad all_gather batches to be the maximum batch size. - batch_sizes = [x.item() for x in batch_sizes] - max_bs = max(batch_sizes) - - timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes] - loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes] - dist.all_gather(timestep_batches, local_ts) - dist.all_gather(loss_batches, local_losses) - timesteps = [ - x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs] - ] - losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]] - self.update_with_all_losses(timesteps, losses) - - @abstractmethod - def update_with_all_losses(self, ts, losses): - """ - Update the reweighting using losses from a model. - Sub-classes should override this method to update the reweighting - using losses from the model. - This method directly updates the reweighting without synchronizing - between workers. It is called by update_with_local_losses from all - ranks with identical arguments. Thus, it should have deterministic - behavior to maintain state across workers. - :param ts: a list of int timesteps. - :param losses: a list of float losses, one per timestep. - """ - - -class LossSecondMomentResampler(LossAwareSampler): - def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001): - self.diffusion = diffusion - self.history_per_term = history_per_term - self.uniform_prob = uniform_prob - self._loss_history = np.zeros( - [diffusion.num_timesteps, history_per_term], dtype=np.float64 - ) - self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int) - - def weights(self): - if not self._warmed_up(): - return np.ones([self.diffusion.num_timesteps], dtype=np.float64) - weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1)) - weights /= np.sum(weights) - weights *= 1 - self.uniform_prob - weights += self.uniform_prob / len(weights) - return weights - - def update_with_all_losses(self, ts, losses): - for t, loss in zip(ts, losses): - if self._loss_counts[t] == self.history_per_term: - # Shift out the oldest loss term. - self._loss_history[t, :-1] = self._loss_history[t, 1:] - self._loss_history[t, -1] = loss - else: - self._loss_history[t, self._loss_counts[t]] = loss - self._loss_counts[t] += 1 - - def _warmed_up(self): - return (self._loss_counts == self.history_per_term).all() diff --git a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/dataloader.py b/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/dataloader.py deleted file mode 100644 index 039b9ec3645b2a4626ff47c221e372f32a6ad339..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/dataloader.py +++ /dev/null @@ -1,425 +0,0 @@ -import torch -import torch.multiprocessing as multiprocessing -from torch._C import _set_worker_signal_handlers, \ - _remove_worker_pids, _error_if_any_worker_fails -try: - from torch._C import _set_worker_pids -except: - from torch._C import _update_worker_pids as _set_worker_pids -from .sampler import SequentialSampler, RandomSampler, BatchSampler -import signal -import collections -import re -import sys -import threading -import traceback -from torch._six import string_classes, int_classes -import numpy as np - -if sys.version_info[0] == 2: - import Queue as queue -else: - import queue - - -class ExceptionWrapper(object): - r"Wraps an exception plus traceback to communicate across threads" - - def __init__(self, exc_info): - self.exc_type = exc_info[0] - self.exc_msg = "".join(traceback.format_exception(*exc_info)) - - -_use_shared_memory = False -"""Whether to use shared memory in default_collate""" - - -def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id): - global _use_shared_memory - _use_shared_memory = True - - # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal - # module's handlers are executed after Python returns from C low-level - # handlers, likely when the same fatal signal happened again already. - # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 - _set_worker_signal_handlers() - - torch.set_num_threads(1) - torch.manual_seed(seed) - np.random.seed(seed) - - if init_fn is not None: - init_fn(worker_id) - - while True: - r = index_queue.get() - if r is None: - break - idx, batch_indices = r - try: - samples = collate_fn([dataset[i] for i in batch_indices]) - except Exception: - data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) - else: - data_queue.put((idx, samples)) - - -def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id): - if pin_memory: - torch.cuda.set_device(device_id) - - while True: - try: - r = in_queue.get() - except Exception: - if done_event.is_set(): - return - raise - if r is None: - break - if isinstance(r[1], ExceptionWrapper): - out_queue.put(r) - continue - idx, batch = r - try: - if pin_memory: - batch = pin_memory_batch(batch) - except Exception: - out_queue.put((idx, ExceptionWrapper(sys.exc_info()))) - else: - out_queue.put((idx, batch)) - -numpy_type_map = { - 'float64': torch.DoubleTensor, - 'float32': torch.FloatTensor, - 'float16': torch.HalfTensor, - 'int64': torch.LongTensor, - 'int32': torch.IntTensor, - 'int16': torch.ShortTensor, - 'int8': torch.CharTensor, - 'uint8': torch.ByteTensor, -} - - -def default_collate(batch): - "Puts each data field into a tensor with outer dimension batch size" - - error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" - elem_type = type(batch[0]) - if torch.is_tensor(batch[0]): - out = None - if _use_shared_memory: - # If we're in a background process, concatenate directly into a - # shared memory tensor to avoid an extra copy - numel = sum([x.numel() for x in batch]) - storage = batch[0].storage()._new_shared(numel) - out = batch[0].new(storage) - return torch.stack(batch, 0, out=out) - elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ - and elem_type.__name__ != 'string_': - elem = batch[0] - if elem_type.__name__ == 'ndarray': - # array of string classes and object - if re.search('[SaUO]', elem.dtype.str) is not None: - raise TypeError(error_msg.format(elem.dtype)) - - return torch.stack([torch.from_numpy(b) for b in batch], 0) - if elem.shape == (): # scalars - py_type = float if elem.dtype.name.startswith('float') else int - return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) - elif isinstance(batch[0], int_classes): - return torch.LongTensor(batch) - elif isinstance(batch[0], float): - return torch.DoubleTensor(batch) - elif isinstance(batch[0], string_classes): - return batch - elif isinstance(batch[0], collections.Mapping): - return {key: default_collate([d[key] for d in batch]) for key in batch[0]} - elif isinstance(batch[0], collections.Sequence): - transposed = zip(*batch) - return [default_collate(samples) for samples in transposed] - - raise TypeError((error_msg.format(type(batch[0])))) - - -def pin_memory_batch(batch): - if torch.is_tensor(batch): - return batch.pin_memory() - elif isinstance(batch, string_classes): - return batch - elif isinstance(batch, collections.Mapping): - return {k: pin_memory_batch(sample) for k, sample in batch.items()} - elif isinstance(batch, collections.Sequence): - return [pin_memory_batch(sample) for sample in batch] - else: - return batch - - -_SIGCHLD_handler_set = False -"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one -handler needs to be set for all DataLoaders in a process.""" - - -def _set_SIGCHLD_handler(): - # Windows doesn't support SIGCHLD handler - if sys.platform == 'win32': - return - # can't set signal in child threads - if not isinstance(threading.current_thread(), threading._MainThread): - return - global _SIGCHLD_handler_set - if _SIGCHLD_handler_set: - return - previous_handler = signal.getsignal(signal.SIGCHLD) - if not callable(previous_handler): - previous_handler = None - - def handler(signum, frame): - # This following call uses `waitid` with WNOHANG from C side. Therefore, - # Python can still get and update the process status successfully. - _error_if_any_worker_fails() - if previous_handler is not None: - previous_handler(signum, frame) - - signal.signal(signal.SIGCHLD, handler) - _SIGCHLD_handler_set = True - - -class DataLoaderIter(object): - "Iterates once over the DataLoader's dataset, as specified by the sampler" - - def __init__(self, loader): - self.dataset = loader.dataset - self.collate_fn = loader.collate_fn - self.batch_sampler = loader.batch_sampler - self.num_workers = loader.num_workers - self.pin_memory = loader.pin_memory and torch.cuda.is_available() - self.timeout = loader.timeout - self.done_event = threading.Event() - - self.sample_iter = iter(self.batch_sampler) - - if self.num_workers > 0: - self.worker_init_fn = loader.worker_init_fn - self.index_queue = multiprocessing.SimpleQueue() - self.worker_result_queue = multiprocessing.SimpleQueue() - self.batches_outstanding = 0 - self.worker_pids_set = False - self.shutdown = False - self.send_idx = 0 - self.rcvd_idx = 0 - self.reorder_dict = {} - - base_seed = torch.LongTensor(1).random_(0, 2**31-1)[0] - self.workers = [ - multiprocessing.Process( - target=_worker_loop, - args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn, - base_seed + i, self.worker_init_fn, i)) - for i in range(self.num_workers)] - - if self.pin_memory or self.timeout > 0: - self.data_queue = queue.Queue() - if self.pin_memory: - maybe_device_id = torch.cuda.current_device() - else: - # do not initialize cuda context if not necessary - maybe_device_id = None - self.worker_manager_thread = threading.Thread( - target=_worker_manager_loop, - args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, - maybe_device_id)) - self.worker_manager_thread.daemon = True - self.worker_manager_thread.start() - else: - self.data_queue = self.worker_result_queue - - for w in self.workers: - w.daemon = True # ensure that the worker exits on process exit - w.start() - - _set_worker_pids(id(self), tuple(w.pid for w in self.workers)) - _set_SIGCHLD_handler() - self.worker_pids_set = True - - # prime the prefetch loop - for _ in range(2 * self.num_workers): - self._put_indices() - - def __len__(self): - return len(self.batch_sampler) - - def _get_batch(self): - if self.timeout > 0: - try: - return self.data_queue.get(timeout=self.timeout) - except queue.Empty: - raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout)) - else: - return self.data_queue.get() - - def __next__(self): - if self.num_workers == 0: # same-process loading - indices = next(self.sample_iter) # may raise StopIteration - batch = self.collate_fn([self.dataset[i] for i in indices]) - if self.pin_memory: - batch = pin_memory_batch(batch) - return batch - - # check if the next sample has already been generated - if self.rcvd_idx in self.reorder_dict: - batch = self.reorder_dict.pop(self.rcvd_idx) - return self._process_next_batch(batch) - - if self.batches_outstanding == 0: - self._shutdown_workers() - raise StopIteration - - while True: - assert (not self.shutdown and self.batches_outstanding > 0) - idx, batch = self._get_batch() - self.batches_outstanding -= 1 - if idx != self.rcvd_idx: - # store out-of-order samples - self.reorder_dict[idx] = batch - continue - return self._process_next_batch(batch) - - next = __next__ # Python 2 compatibility - - def __iter__(self): - return self - - def _put_indices(self): - assert self.batches_outstanding < 2 * self.num_workers - indices = next(self.sample_iter, None) - if indices is None: - return - self.index_queue.put((self.send_idx, indices)) - self.batches_outstanding += 1 - self.send_idx += 1 - - def _process_next_batch(self, batch): - self.rcvd_idx += 1 - self._put_indices() - if isinstance(batch, ExceptionWrapper): - raise batch.exc_type(batch.exc_msg) - return batch - - def __getstate__(self): - # TODO: add limited pickling support for sharing an iterator - # across multiple threads for HOGWILD. - # Probably the best way to do this is by moving the sample pushing - # to a separate thread and then just sharing the data queue - # but signalling the end is tricky without a non-blocking API - raise NotImplementedError("DataLoaderIterator cannot be pickled") - - def _shutdown_workers(self): - try: - if not self.shutdown: - self.shutdown = True - self.done_event.set() - # if worker_manager_thread is waiting to put - while not self.data_queue.empty(): - self.data_queue.get() - for _ in self.workers: - self.index_queue.put(None) - # done_event should be sufficient to exit worker_manager_thread, - # but be safe here and put another None - self.worker_result_queue.put(None) - finally: - # removes pids no matter what - if self.worker_pids_set: - _remove_worker_pids(id(self)) - self.worker_pids_set = False - - def __del__(self): - if self.num_workers > 0: - self._shutdown_workers() - - -class DataLoader(object): - """ - Data loader. Combines a dataset and a sampler, and provides - single- or multi-process iterators over the dataset. - - Arguments: - dataset (Dataset): dataset from which to load the data. - batch_size (int, optional): how many samples per batch to load - (default: 1). - shuffle (bool, optional): set to ``True`` to have the data reshuffled - at every epoch (default: False). - sampler (Sampler, optional): defines the strategy to draw samples from - the dataset. If specified, ``shuffle`` must be False. - batch_sampler (Sampler, optional): like sampler, but returns a batch of - indices at a time. Mutually exclusive with batch_size, shuffle, - sampler, and drop_last. - num_workers (int, optional): how many subprocesses to use for data - loading. 0 means that the data will be loaded in the main process. - (default: 0) - collate_fn (callable, optional): merges a list of samples to form a mini-batch. - pin_memory (bool, optional): If ``True``, the data loader will copy tensors - into CUDA pinned memory before returning them. - drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, - if the dataset size is not divisible by the batch size. If ``False`` and - the size of dataset is not divisible by the batch size, then the last batch - will be smaller. (default: False) - timeout (numeric, optional): if positive, the timeout value for collecting a batch - from workers. Should always be non-negative. (default: 0) - worker_init_fn (callable, optional): If not None, this will be called on each - worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as - input, after seeding and before data loading. (default: None) - - .. note:: By default, each worker will have its PyTorch seed set to - ``base_seed + worker_id``, where ``base_seed`` is a long generated - by main process using its RNG. You may use ``torch.initial_seed()`` to access - this value in :attr:`worker_init_fn`, which can be used to set other seeds - (e.g. NumPy) before data loading. - - .. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an - unpicklable object, e.g., a lambda function. - """ - - def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, - num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False, - timeout=0, worker_init_fn=None): - self.dataset = dataset - self.batch_size = batch_size - self.num_workers = num_workers - self.collate_fn = collate_fn - self.pin_memory = pin_memory - self.drop_last = drop_last - self.timeout = timeout - self.worker_init_fn = worker_init_fn - - if timeout < 0: - raise ValueError('timeout option should be non-negative') - - if batch_sampler is not None: - if batch_size > 1 or shuffle or sampler is not None or drop_last: - raise ValueError('batch_sampler is mutually exclusive with ' - 'batch_size, shuffle, sampler, and drop_last') - - if sampler is not None and shuffle: - raise ValueError('sampler is mutually exclusive with shuffle') - - if self.num_workers < 0: - raise ValueError('num_workers cannot be negative; ' - 'use num_workers=0 to disable multiprocessing.') - - if batch_sampler is None: - if sampler is None: - if shuffle: - sampler = RandomSampler(dataset) - else: - sampler = SequentialSampler(dataset) - batch_sampler = BatchSampler(sampler, batch_size, drop_last) - - self.sampler = sampler - self.batch_sampler = batch_sampler - - def __iter__(self): - return DataLoaderIter(self) - - def __len__(self): - return len(self.batch_sampler) diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Animal Revolt Battle Simulator The Ultimate Animal Combat Game for PC.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Animal Revolt Battle Simulator The Ultimate Animal Combat Game for PC.md deleted file mode 100644 index 4b347817c8449c26331b90bf7649629370c39b5e..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Animal Revolt Battle Simulator The Ultimate Animal Combat Game for PC.md +++ /dev/null @@ -1,208 +0,0 @@ -
      -

      Animal Revolt Battle Simulator: A Physics-Based Sandbox Game

      -

      Do you love watching animals fight each other in a realistic and hilarious way? Do you want to create your own epic battles between different types of creatures, from dinosaurs to dragons, from sharks to goats, and even from godzilla to t-rex? Do you want to join the action yourself and blast away the enemy with some powerful guns? If you answered yes to any of these questions, then you should definitely check out Animal Revolt Battle Simulator, a physics-based sandbox game that lets you do all of these things and more!

      -

      animal revolt battle simulator free download 2022 pc


      DOWNLOAD ⚹⚹⚹ https://ssurll.com/2uNSAs



      -

      What is Animal Revolt Battle Simulator?

      -

      Animal Revolt Battle Simulator is a game developed by VDimension and published by VDimension and Yodo1 Ltd. It was released on Steam on April 15, 2022, and has received overwhelmingly positive reviews from players and critics alike. It is also available on Google Play and Nintendo Switch.

      -

      The main features of the game

      -

      Animal Revolt Battle Simulator is a game that offers you ultimate freedom and creativity in creating funny and chaotic battles between all sorts of ragdoll creatures. You can:

      -
        -
      • Build your own maps or pick from a selection of ready-made ones.
      • -
      • Place up to seven opposing armies made of different types of beasts and watch them tear each other apart in an epic battle!
      • -
      • Create your own custom monsters by combining different body parts and weapons. You can attach as many body parts and weapons as you want, anywhere you want!
      • -
      • Download a vast selection of custom monsters, maps, and buildings created by other players from the Steam Workshop. You can also upload your own creations for other people to try out.
      • -
      • Test your tactical and strategic expertise in the campaign mode. Pick the right beasts, place them in the right place, and command them to defeat the enemy.
      • -
      • Join the fight yourself in the first-person mode and blow the enemy away with some powerful guns!
      • -
      -

      How to play Animal Revolt Battle Simulator?

      -

      The game is very easy to play and has a simple interface. You can use your mouse to drag and drop units on the map, rotate them, scale them, clone them, delete them, etc. You can also use your keyboard to move around the map, zoom in and out, change the camera angle, etc. You can also use hotkeys to access different menus and options.

      -

      animal revolt battle simulator pc download free full version
      -animal revolt battle simulator 2022 free download for windows 10
      -how to download animal revolt battle simulator on pc for free
      -animal revolt battle simulator official game free download pc
      -animal revolt battle simulator pc game free download highly compressed
      -animal revolt battle simulator free download latest version pc
      -animal revolt battle simulator pc game crack free download
      -animal revolt battle simulator free download steamunlocked
      -animal revolt battle simulator pc game system requirements
      -animal revolt battle simulator gameplay free download pc
      -animal revolt battle simulator mod apk free download for pc
      -animal revolt battle simulator online multiplayer free download pc
      -animal revolt battle simulator cheats codes free download pc
      -animal revolt battle simulator sandbox mode free download pc
      -animal revolt battle simulator best battles free download pc
      -animal revolt battle simulator dinosaurs free download pc
      -animal revolt battle simulator godzilla free download pc
      -animal revolt battle simulator custom creatures free download pc
      -animal revolt battle simulator realistic physics free download pc
      -animal revolt battle simulator ragdoll effects free download pc
      -animal revolt battle simulator campaign mode free download pc
      -animal revolt battle simulator tips and tricks free download pc
      -animal revolt battle simulator review free download pc
      -animal revolt battle simulator android emulator free download pc
      -animal revolt battle simulator bluestacks app player free download pc
      -animal revolt battle simulator mumu player free download pc
      -animal revolt battle simulator nox player free download pc
      -animal revolt battle simulator ldplayer free download pc
      -animal revolt battle simulator memu play free download pc
      -animal revolt battle simulator gameloop free download pc
      -animal revolt battle simulator smartgaga free download pc
      -animal revolt battle simulator koplayer free download pc
      -animal revolt battle simulator droid4x free download pc
      -animal revolt battle simulator genymotion free download pc
      -animal revolt battle simulator andy emulator free download pc
      -animal revolt battle simulator remix os player free download pc
      -animal revolt battle simulator phoenix os player free download pc
      -animal revolt battle simulator prime os player free download pc
      -animal revolt battle simulator tencent gaming buddy free download pc
      -animal revolt battle simulator mebox emulator free download pc
      -animal revolt battle simulator windroye emulator free download pc
      -animal revolt battle simulator amiduos emulator free download pc
      -animal revolt battle simulator leapdroid emulator free download pc
      -animal revolt battle simulator youwave emulator free download pc
      -animal revolt battle simulator jar of beans emulator free download pc
      -animal revolt battle simulator console os emulator free download pc

      -

      Once you have placed your units on the map, you can press the start button to begin the battle. You can watch the battle unfold from different perspectives, such as top-down, side-view, or free camera. You can also pause, resume, slow down, or speed up the battle at any time. You can also switch to the first-person mode and join the battle yourself.

      -

      The game has a physics-based engine that makes the battles realistic and hilarious. You can see the limbs bending, necks twisting, bodies flying around, blood splattering, etc. The game also has ragdoll effects that make the creatures flop around when they die or get hit. The game also has sound effects that add to the immersion and humor of the game.

      -

      How to download Animal Revolt Battle Simulator for free on PC?

      -

      If you want to download Animal Revolt Battle Simulator for free on your PC, you need to follow some steps. However, before you do that, you need to make sure that your PC meets the minimum system requirements for the game. Here are the system requirements for Animal Revolt Battle Simulator:

      -

      The system requirements for the game

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      MinimumRecommended
      OS: Windows 7 or newerOS: Windows 10
      Processor: Intel Core i5-2400 @ 3.1 GHz or AMD FX-6300 @ 3.5 GHz or equivalentProcessor: Intel Core i7-4770 @ 3.4 GHz or AMD Ryzen 5 1600 @ 3.2 GHz or equivalent
      Memory: 8 GB RAMMemory: 16 GB RAM
      Graphics: NVIDIA GeForce GTX 670 or AMD R9 270 (2GB VRAM with Shader Model 5.0 or better)Graphics: NVIDIA GeForce GTX 970 or AMD R9 290X (4GB VRAM with Shader Model 5.0 or better)
      DirectX: Version 11DirectX: Version 11
      Storage: 4 GB available spaceStorage: 4 GB available space
      Sound Card: DirectX compatible sound card with latest driversSound Card: DirectX compatible sound card with latest drivers
      -

      The steps to download and install the game

      -

      If your PC meets the system requirements, you can follow these steps to download and install Animal Revolt Battle Simulator for free on your PC:

      -
        -
      1. Go to a trusted and reliable website that offers free downloads of PC games, such as [Ocean of Games] or [Steam Unlocked].
      2. -
      3. Search for Animal Revolt Battle Simulator in the search bar and click on the game link.
      4. -
      5. Read the description and instructions carefully and make sure you have enough space on your hard drive.
      6. -
      7. Click on the download button and wait for the download to finish.
      8. -
      9. Extract the zip file using WinRAR or 7-Zip and open the extracted folder.
      10. -
      11. Run the setup.exe file as administrator and follow the installation steps.
      12. -
      13. Copy the crack files from the crack folder and paste them into the game installation folder.
      14. -
      15. Launch the game from the desktop shortcut or the game folder and enjoy!
      16. -

        The benefits of downloading the game from a trusted source

        -

        There are many benefits of downloading Animal Revolt Battle Simulator from a trusted source, such as:

        -
          -
        • You can get the game for free without paying any money.
        • -
        • You can get the latest version of the game with all the updates and bug fixes.
        • -
        • You can get a safe and virus-free download without any malware or spyware.
        • -
        • You can get a fast and smooth download without any interruptions or errors.
        • -
        • You can get a full and complete game without any missing files or features.
        • -
        • You can get a user-friendly and easy-to-use interface with clear instructions and support.
        • -
        • You can get access to a large community of gamers who share their creations, feedback, and tips.
        • -
        -

        How to create your own custom creatures and maps in Animal Revolt Battle Simulator?

        -

        One of the most fun and creative aspects of Animal Revolt Battle Simulator is that you can create your own custom creatures and maps using the in-game tools. You can also download and use other players' creations from the Steam Workshop. Here is how you can do that:

        -

        The unit creator tool

        -

        The unit creator tool allows you to create your own custom monsters by combining different body parts and weapons. You can access this tool by clicking on the unit creator button on the main menu. You can then:

        -
          -
        • Select a base body from a variety of animals, such as lions, bears, crocodiles, elephants, etc.
        • -
        • Add different body parts, such as heads, legs, wings, tails, horns, etc., from different animals. You can also add human body parts, such as arms, hands, feet, etc.
        • -
        • Add different weapons, such as swords, axes, guns, rockets, lasers, etc., to any body part. You can also add shields, armor, helmets, etc., for extra protection.
        • -
        • Adjust the size, position, rotation, color, texture , and transparency of each body part and weapon. You can also use the sliders to change the mass, health, damage, speed, and range of each unit.
        • -
        • Save your custom unit and give it a name and a description. You can also choose a category and a faction for your unit.
        • -
        • Use your custom unit in the sandbox mode or the campaign mode. You can also upload your custom unit to the Steam Workshop for other players to download and use.
        • -
        -

        The map editor tool

        -

        The map editor tool allows you to create your own custom maps by placing different objects and terrain on a flat surface. You can access this tool by clicking on the map editor button on the main menu. You can then:

        -
          -
        • Select a map size from small, medium, large, or huge.
        • -
        • Select a terrain type from grass, sand, snow, water, lava, etc.
        • -
        • Select a skybox from day, night, sunset, etc.
        • -
        • Place different objects on the map, such as buildings, trees, rocks, bridges, fences, etc. You can also place animals and humans as static or dynamic objects.
        • -
        • Adjust the size, position, rotation, color, texture, and transparency of each object. You can also use the sliders to change the gravity, wind, fog, etc., of the map.
        • -
        • Save your custom map and give it a name and a description. You can also upload your custom map to the Steam Workshop for other players to download and use.
        • -
        -

        The workshop and community creations

        -

        The workshop is a feature that allows you to browse, download, and use other players' custom units and maps. You can access this feature by clicking on the workshop button on the main menu. You can then:

        -
          -
        • Search for custom units and maps by name, category, rating, popularity, etc.
        • -
        • View the details and screenshots of each custom unit and map. You can also read the comments and reviews from other players.
        • -
        • Download the custom units and maps that you like and add them to your library.
        • -
        • Use the custom units and maps in the sandbox mode or the campaign mode. You can also rate and review them after using them.
        • -
        -

        How to join the battles yourself in Animal Revolt Battle Simulator?

        -

        If you want to join the battles yourself and have some fun shooting at the enemy creatures, you can do that by switching to the first-person mode. Here is how you can do that:

        -

        The first-person mode

        -

        The first-person mode is a feature that allows you to control one of your units in the battle and see the action from their perspective. You can access this feature by pressing the F key on your keyboard during a battle. You can then:

        -
          -
        • Move around using the WASD keys or the arrow keys.
        • -
        • Aim using your mouse cursor.
        • -
        • Shoot using the left mouse button.
        • -
        • Reload using the R key.
        • -
        • Switch weapons using the Q key or the mouse wheel.
        • -
        • Crouch using the C key.
        • -
        • Jump using the spacebar.
        • -
        -

        The weapons and guns available

        -

        In the first-person mode, you can use different weapons and guns to shoot at the enemy creatures. Some of these weapons are:

        -
          -
        • Pistol: A basic handgun that has low damage but high accuracy and fire rate.
        • -
        • Shotgun: A powerful shotgun that has high damage but low accuracy and fire rate.
        • -
        • Rifle: A semi-automatic rifle that has medium damage and accuracy but high fire rate.
        • -
        • Sniper: A long-range sniper rifle that has high damage and accuracy but low fire rate.
        • -
        • Rocket Launcher: A devastating rocket launcher that has very high damage but low accuracy and fire rate.
        • -
        • Laser Gun: A futuristic laser gun that has medium damage but high accuracy and fire rate.
        • -
        -

        The tips and tricks for surviving the battles

        -

        If you want to survive the battles in the first-person mode, you need to follow some tips and tricks. Here are some of them:

        -
          -
        • Pick a weapon that suits your playstyle and situation. For example, use a shotgun for close-range combat or a sniper for long-range combat.
        • -
        • Aim for the head or other weak spots of the enemy creatures to deal more damage and kill them faster.
        • -
        • Reload your weapon before you run out of ammo or when you are in cover.
        • -
        • Use cover to avoid getting hit by enemy fire or attacks. You can hide behind buildings , trees, rocks, fences, etc., and peek out to shoot.
        • -
        • Keep moving and don't stay in one place for too long. The enemy creatures will chase you and attack you from different directions.
        • -
        • Use the environment to your advantage. You can shoot explosive barrels, gas tanks, or vehicles to cause explosions and damage the enemy creatures.
        • -
        • Watch your health and stamina bars. You can heal yourself by picking up health packs or eating food. You can also restore your stamina by resting or drinking water.
        • -
        • Have fun and experiment with different combinations of units, weapons, and maps. You can create some hilarious and epic scenarios in the game!
        • -
        -

        Conclusion

        -

        Animal Revolt Battle Simulator is a physics-based sandbox game that lets you create and watch funny and chaotic battles between all sorts of ragdoll creatures. You can also create your own custom monsters and maps using the in-game tools or download other players' creations from the Steam Workshop. You can also join the battles yourself in the first-person mode and shoot at the enemy creatures with some powerful guns. The game is easy to play and has a simple interface, but it also offers a lot of freedom and creativity for you to enjoy. The game is also realistic and hilarious, thanks to its physics-based engine, ragdoll effects, sound effects, etc. The game is available for free on PC, as well as on Google Play and Nintendo Switch. If you are looking for a fun and entertaining game that will make you laugh and keep you hooked for hours, then you should definitely download Animal Revolt Battle Simulator today!

        -

        FAQs

        -

        Here are some frequently asked questions about Animal Revolt Battle Simulator:

        -
          -
        1. Q: How many units can I place on the map?
          -A: You can place up to 1000 units on the map, depending on the map size and your PC performance.
        2. -
        3. Q: How many custom units can I create?
          -A: You can create up to 100 custom units using the unit creator tool.
        4. -
        5. Q: How many custom maps can I create?
          -A: You can create up to 100 custom maps using the map editor tool.
        6. -
        7. Q: How can I share my custom units and maps with other players?
          -A: You can share your custom units and maps with other players by uploading them to the Steam Workshop. You can also download other players' creations from there.
        8. -
        9. Q: How can I contact the developers of the game?
          -A: You can contact the developers of the game by sending them an email at vdimensiongames@gmail.com or by visiting their website at www.vdimensiongames.com.
        10. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/snjyor/ChatGPT_demo/README.md b/spaces/snjyor/ChatGPT_demo/README.md deleted file mode 100644 index 6c426d5e206d23fcb9ee71737368ae031a64baf8..0000000000000000000000000000000000000000 --- a/spaces/snjyor/ChatGPT_demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ChatGPT_demo -emoji: 💻 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-large-patch14.md b/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-large-patch14.md deleted file mode 100644 index 11384803a049e29bf6f260fb0399a3bb808f4224..0000000000000000000000000000000000000000 --- a/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-large-patch14.md +++ /dev/null @@ -1,136 +0,0 @@ -# Model Card: CLIP - -Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md). - -## Model Details - -The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within. - -### Model Date - -January 2021 - -### Model Type - -The base model uses a ViT-L/14 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. - -The original implementation had two variants: one using a ResNet image encoder and the other using a Vision Transformer. This repository has the variant with the Vision Transformer. - - -### Documents - -- [Blog Post](https://openai.com/blog/clip/) -- [CLIP Paper](https://arxiv.org/abs/2103.00020) - - -### Use with Transformers - -```python -from PIL import Image -import requests - -from transformers import CLIPProcessor, CLIPModel - -model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") -processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") - -url = "http://images.cocodataset.org/val2017/000000039769.jpg" -image = Image.open(requests.get(url, stream=True).raw) - -inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) - -outputs = model(**inputs) -logits_per_image = outputs.logits_per_image # this is the image-text similarity score -probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities -``` - - -## Model Use - -### Intended Use - -The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. - -#### Primary intended uses - -The primary intended users of these models are AI researchers. - -We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. - -### Out-of-Scope Use Cases - -**Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. - -Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. - -Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. - - - -## Data - -The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users. - -### Data Mission Statement - -Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset. - - - -## Performance and Limitations - -### Performance - -We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets: - -- Food101 -- CIFAR10 -- CIFAR100 -- Birdsnap -- SUN397 -- Stanford Cars -- FGVC Aircraft -- VOC2007 -- DTD -- Oxford-IIIT Pet dataset -- Caltech101 -- Flowers102 -- MNIST -- SVHN -- IIIT5K -- Hateful Memes -- SST-2 -- UCF101 -- Kinetics700 -- Country211 -- CLEVR Counting -- KITTI Distance -- STL-10 -- RareAct -- Flickr30 -- MSCOCO -- ImageNet -- ImageNet-A -- ImageNet-R -- ImageNet Sketch -- ObjectNet (ImageNet Overlap) -- Youtube-BB -- ImageNet-Vid - -## Limitations - -CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance. - -### Bias and Fairness - -We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper). - -We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks. - - - -## Feedback - -### Where to send questions or comments about the model - -Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9) \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py deleted file mode 100644 index 3279dae89a8bca95178bbe1285d3cb334890b12f..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import mmap -import os -import shutil -import struct -import typing as tp -from functools import lru_cache - -import numpy as np -import torch -from fairseq.data import indexed_dataset -from fairseq.data.huffman import HuffmanCoder -from fairseq.file_io import PathManager - - -class HuffmanMMapIndex: - """ - keep an index of the offsets in the huffman binary file. - First a header, then the list of sizes (num tokens) for each instance and finally - the addresses of each instance. - """ - - _HDR_MAGIC = b"HUFFIDX\x00\x00" - _VERSION = 1 - - @classmethod - def writer(cls, path: str, data_len: int): - class _Writer: - def __enter__(self): - self._file = open(path, "wb") - - # write header (magic + version) - self._file.write(cls._HDR_MAGIC) - self._file.write(struct.pack(" None: - self._path_prefix = path_prefix - self._coder = coder - self._sizes = [] - self._ptrs = [] - self._data_len = 0 - - def open(self): - self._coder.to_file(vocab_file_path(self._path_prefix)) - self._data_file = open(indexed_dataset.data_file_path(self._path_prefix), "wb") - - def __enter__(self) -> "HuffmanMMapIndexedDatasetBuilder": - self.open() - return self - - def add_item(self, tokens: tp.List[str]) -> None: - """ - add a list of tokens to the dataset, they will compressed with the - provided coder before being written to file. - """ - encoded = self._coder.encode(tokens) - code_len = len(encoded) - last_ptr = 0 - if len(self._ptrs) > 0: - last_ptr = self._ptrs[-1] - self._sizes.append(len(tokens)) - self._ptrs.append(last_ptr + code_len) - self._data_len += code_len - self._data_file.write(encoded) - - def append(self, other_dataset_path_prefix: str) -> None: - """ - append an existing dataset. - Beware, if it wasn't built with the same coder, you are in trouble. - """ - other_index = HuffmanMMapIndex( - indexed_dataset.index_file_path(other_dataset_path_prefix) - ) - for (ptr, size) in other_index: - self._ptrs.append(ptr + self._data_len) - self._sizes.append(size) - - # Concatenate data - with open(indexed_dataset.data_file_path(other_dataset_path_prefix), "rb") as f: - shutil.copyfileobj(f, self._data_file) - - self._data_len += other_index.data_len - - def close(self): - self._data_file.close() - with HuffmanMMapIndex.writer( - indexed_dataset.index_file_path(self._path_prefix), self._data_len - ) as index: - index.write(self._sizes, self._ptrs) - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - self.close() diff --git a/spaces/stable-diffusion-ai/upscaling/README.md b/spaces/stable-diffusion-ai/upscaling/README.md deleted file mode 100644 index 230b13526e18ad81e52a1b27331ddbce7a2b7918..0000000000000000000000000000000000000000 --- a/spaces/stable-diffusion-ai/upscaling/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Stable Diffusion - Image Upscaling -emoji: 🚀 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: ai-art/upscaling ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Astrological Charts Pro 9.0.8 APK [Paid] [Full] EXCLUSIVE.md b/spaces/stomexserde/gpt4-ui/Examples/Astrological Charts Pro 9.0.8 APK [Paid] [Full] EXCLUSIVE.md deleted file mode 100644 index 60d8eb6571e617732a4de054265d610b649b4ce9..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Astrological Charts Pro 9.0.8 APK [Paid] [Full] EXCLUSIVE.md +++ /dev/null @@ -1,21 +0,0 @@ - -

        Astrological Charts Pro 9.0.8 APK [Paid] [Full]: A Professional Astrology App for Android

        -

        If you are looking for a professional astrological program for Android, you might want to check out Astrological Charts Pro 9.0.8 APK [Paid] [Full]. This app is designed to provide you with 12 types of astrological charts, including planets, asteroids, fictitious points, lots, aspects, houses, and more. You can also access interpretations of natal and transit charts, synastry and composite charts, progressions and directions, returns and lunar phases, and harmonics.

        -

        Astrological Charts Pro 9.0.8 APK [Paid] [Full]


        DOWNLOADhttps://urlgoal.com/2uI8lE



        -

        Astrological Charts Pro 9.0.8 APK [Paid] [Full] has a database of about 100000 places with specified time zones, so you don't have to worry about calculating the difference with GMT. You can also add new places if you want. The app calculates exact dates of triggering aspects, periods of aspects by orb, moments of sign changes, lunar phases, eclipses, void of course Moon, midpoints and planetary hours in the menu of the main page. You can choose between Tropical and Sidereal zodiac in the app.

        -

        Astrological Charts Pro 9.0.8 APK [Paid] [Full] is not only a longitude calculator, but also provides data such as latitude, declination and parallels aspects for 10 planets. You can customize the orbs and the house systems according to your preference. There are 11 house systems and 22 types of aspect available in the app.

        -

        If you want to download Astrological Charts Pro 9.0.8 APK [Paid] [Full], you can find it on Google Play Store[^1^], APKCombo[^2^], or Aptoide[^3^]. The app costs $19.99 and requires Android 4.4 or higher to run. It has a rating of 4.7 out of 5 stars on Google Play Store based on 566 reviews.

        -

        -

        Astrological Charts Pro 9.0.8 APK [Paid] [Full] is a must-have app for anyone who is interested in astrology and wants to have a professional tool at their fingertips. Whether you want to analyze your own chart, compare it with others, or explore different astrological techniques, this app will help you do it with ease and accuracy.

        Here are some of the features and benefits of Astrological Charts Pro 9.0.8 APK [Paid] [Full] that you can enjoy:

        -
          -
        • 12 types of astrological charts: You can generate and view different types of charts, such as natal, transit, synastry, composite, progressions, directions, profections, returns, lunar phases, and harmonics. You can also switch between one radix chart and dual radix chart modes.
        • -
        • 13 asteroids and 23 fictitious points: You can include more factors in your analysis, such as Chiron, Ceres, Pallas, Juno, Vesta, Lilith, Vertex, Part of Fortune, and more. You can also add or remove any asteroid or point from the chart.
        • -
        • Interpretations: You can read the meanings of natal planets in zodiac signs, in houses and in retrograde state, transit planets in natal houses, natal aspects, from transit to natal aspects, synastry aspects, natal Ascendent and houses in signs. The app also provides a Google search option for any interpretation that is not available.
        • -
        • Customizable orbs and house systems: You can adjust the orbs for each aspect and planet according to your preference. You can also choose from 11 house systems, such as Placidus, Koch, Equal, Whole Sign, Campanus, Regiomontanus, Porphyry, Morinus, Alcabitius, Meridian and Axial Rotation System.
        • -
        • Data accuracy and convenience: You can rely on the app's database of about 100000 places with specified time zones to calculate the charts correctly. You can also add new places if you need to. The app also shows you the exact dates of triggering aspects, periods of aspects by orb, moments of sign changes, lunar phases, eclipses, void of course Moon, midpoints and planetary hours in the menu of the main page.
        • -
        • Tropical and Sidereal zodiac: You can switch between the two zodiac systems in the app. You can also choose from different ayanamsas for the Sidereal zodiac.
        • -
        • Data export and import: You can save your charts as PDF files or images and share them with others. You can also import data from other astrological programs or websites using CSV files.
        • -
        -

        Astrological Charts Pro 9.0.8 APK [Paid] [Full] is a comprehensive and professional astrological program for Android that will satisfy your astrological needs. Download it today and discover the secrets of the stars!

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Chick Corea A Work In Progress Pdf 24 [PATCHED].md b/spaces/stomexserde/gpt4-ui/Examples/Chick Corea A Work In Progress Pdf 24 [PATCHED].md deleted file mode 100644 index e3276d180274d57bda1b1ae32f39646f53ec872f..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Chick Corea A Work In Progress Pdf 24 [PATCHED].md +++ /dev/null @@ -1,23 +0,0 @@ -
        -

        Chick Corea's A Work in Progress: A Treasure of Musical Knowledge

        -

        Chick Corea was one of the most influential and prolific jazz pianists and composers of the 20th and 21st centuries. He left behind a legacy of musical innovation and creativity that spans genres and generations. He also left behind a treasure of musical knowledge: his book A Work in Progress ... On Being a Musician.

        -

        A Work in Progress is a collection of Chick Corea's notes and essays on various aspects of music and musicianship, such as creativity, practice, performance, communication, and aesthetics. He shares his insights and experiences from his long and successful career, as well as his personal philosophy and approach to music. He answers common questions that musicians face, such as:

        -

        chick corea a work in progress pdf 24


        Download ⚹⚹⚹ https://urlgoal.com/2uI89L



        -
          -
        • What is the single most important element in making good music?
        • -
        • How can one gain the ability to completely originate one's own music?
        • -
        • How much time and effort should go into getting a single musical product?
        • -
        • What's the best way to evaluate one's own live performance?
        • -
        • What can one do about a "difficult" audience?
        • -
        • Can others' opinions on your music serve some useful purpose?
        • -
        -

        The book is not a typical instructional manual or textbook. It is more like a conversation with a master musician who generously shares his wisdom and advice. It is also a work in progress, as Chick Corea intended to add more chapters and answer more questions from his fans and students. The book is available in English and Spanish-language editions, exclusively at Chick's official store.

        -

        One of the chapters that illustrates Chick Corea's musical mind is chapter 24, titled "Pulse and Time Flow". In this chapter, he explains how he imagines a pulse while laying phrases over it, how he creates tempo intentionally, how he uses different subdivisions of the beat to create rhythmic variety and interest, and how he relates to other musicians in a group setting. He also gives some practical exercises to improve one's sense of pulse and time flow.

        -

        A Work in Progress by Chick Corea is a valuable resource for any musician who wants to learn from one of the greatest artists of our time. It is also a testament to Chick Corea's love for music and his dedication to sharing it with others. As he writes in the introduction:

        -
        "I hope you can use some of it to your benefit and success in making music and being a musician."
        - -

        If you want to read A Work in Progress by Chick Corea, you can download it as a PDF file from his official website. You can also find some excerpts and samples of the book on Scribd and PDFSLIDE.NET. These websites allow you to preview some of the pages and chapters of the book before you buy it. You can also read some reviews and comments from other readers who have enjoyed the book.

        -

        One of the benefits of reading A Work in Progress by Chick Corea as a PDF file is that you can easily access it on your computer, tablet, or smartphone. You can also print it out if you prefer to read it on paper. You can also highlight, bookmark, and annotate the PDF file as you read it. This way, you can make notes of the parts that interest you or that you want to practice later.

        -

        A Work in Progress by Chick Corea is not only a book for jazz musicians or pianists. It is a book for anyone who loves music and wants to improve their musical skills and knowledge. It is a book that will inspire you, challenge you, and entertain you. It is a book that will make you appreciate Chick Corea's music even more.

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/stratussox/yolov5_inference/app.py b/spaces/stratussox/yolov5_inference/app.py deleted file mode 100644 index 320dbdf58d22accfca6a5ffc2faa02814212ad91..0000000000000000000000000000000000000000 --- a/spaces/stratussox/yolov5_inference/app.py +++ /dev/null @@ -1,59 +0,0 @@ -import json -from io import BytesIO -from PIL import Image -import os - -import streamlit as st -import pandas as pd -import numpy as np -import torch -import cv2 - -#from simple_detection import detect -from one_image_detection import detect - -if 'img_list' not in st.session_state: - st.session_state.img_list = [] - -st.title('Direct YoloV5 Inference') -instructions = """ - Upload your images and run the model to - test the basic YoloV5 model. - - Check the original YoloV5 repository in: https://github.com/ultralytics/yolov5 - """ -st.write(instructions) - -if st.button('Run Model'): - result = detect(st.session_state['img_list']) - st.session_state['img_list'] = [] - - for image in result: - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - st.image(image, caption='Result') - -file = st.file_uploader('Upload An Image') - -if file: # if user uploaded file - file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8) - img = cv2.imdecode(file_bytes, 1) - - # print(len(img_list)) - # img_list.append(img) - # print("Loaded images = ") - # print(len(img_list)) - - st.session_state['img_list'].append(img) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - st.title("Uploaded Image") - resized_image = cv2.resize(img, (256,256)) - st.image(resized_image) - - - - - - - - \ No newline at end of file diff --git a/spaces/sub314xxl/DualStyleGAN/app.py b/spaces/sub314xxl/DualStyleGAN/app.py deleted file mode 100644 index 13c0b5fd70358c15919c1c8ed9267af3cf0cc3db..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/DualStyleGAN/app.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import argparse -import pathlib - -import gradio as gr - -from dualstylegan import Model - -DESCRIPTION = '''# Portrait Style Transfer with DualStyleGAN - -overview -''' - - -def get_style_image_url(style_name: str) -> str: - base_url = 'https://raw.githubusercontent.com/williamyang1991/DualStyleGAN/main/doc_images' - filenames = { - 'cartoon': 'cartoon_overview.jpg', - 'caricature': 'caricature_overview.jpg', - 'anime': 'anime_overview.jpg', - 'arcane': 'Reconstruction_arcane_overview.jpg', - 'comic': 'Reconstruction_comic_overview.jpg', - 'pixar': 'Reconstruction_pixar_overview.jpg', - 'slamdunk': 'Reconstruction_slamdunk_overview.jpg', - } - return f'{base_url}/{filenames[style_name]}' - - -def get_style_image_markdown_text(style_name: str) -> str: - url = get_style_image_url(style_name) - return f'
        style image
        ' - - -def update_slider(choice: str) -> dict: - max_vals = { - 'cartoon': 316, - 'caricature': 198, - 'anime': 173, - 'arcane': 99, - 'comic': 100, - 'pixar': 121, - 'slamdunk': 119, - } - return gr.Slider.update(maximum=max_vals[choice]) - - -def update_style_image(style_name: str) -> dict: - text = get_style_image_markdown_text(style_name) - return gr.Markdown.update(value=text) - - -def set_example_image(example: list) -> dict: - return gr.Image.update(value=example[0]) - - -def set_example_styles(example: list) -> list[dict]: - return [ - gr.Radio.update(value=example[0]), - gr.Slider.update(value=example[1]), - ] - - -def set_example_weights(example: list) -> list[dict]: - return [ - gr.Slider.update(value=example[0]), - gr.Slider.update(value=example[1]), - ] - - -model = Model() - -with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - - with gr.Box(): - gr.Markdown('''## Step 1 (Preprocess Input Image) - -- Drop an image containing a near-frontal face to the **Input Image**. -- If there are multiple faces in the image, hit the Edit button in the upper right corner and crop the input image beforehand. -- Hit the **Detect & Align Face** button. -- Hit the **Reconstruct Face** button. -- The final result will be based on this **Reconstructed Face**. So, if the reconstructed image is not satisfactory, you may want to change the input image. -''') - with gr.Row(): - with gr.Column(): - with gr.Row(): - input_image = gr.Image(label='Input Image', - type='filepath') - with gr.Row(): - detect_button = gr.Button('Detect & Align Face') - with gr.Column(): - with gr.Row(): - aligned_face = gr.Image(label='Aligned Face', - type='numpy', - interactive=False) - with gr.Row(): - reconstruct_button = gr.Button('Reconstruct Face') - with gr.Column(): - reconstructed_face = gr.Image(label='Reconstructed Face', - type='numpy') - instyle = gr.Variable() - - with gr.Row(): - paths = sorted(pathlib.Path('images').glob('*.jpg')) - gr.Examples(examples=[[path.as_posix()] for path in paths], - inputs=input_image) - - with gr.Box(): - gr.Markdown('''## Step 2 (Select Style Image) - -- Select **Style Type**. -- Select **Style Image Index** from the image table below. -''') - with gr.Row(): - with gr.Column(): - style_type = gr.Radio(label='Style Type', - choices=model.style_types) - text = get_style_image_markdown_text('cartoon') - style_image = gr.Markdown(value=text) - style_index = gr.Slider(label='Style Image Index', - minimum=0, - maximum=316, - step=1, - value=26) - - with gr.Row(): - gr.Examples(examples=[ - ['cartoon', 26], - ['caricature', 65], - ['arcane', 63], - ['pixar', 80], - ], - inputs=[style_type, style_index]) - - with gr.Box(): - gr.Markdown('''## Step 3 (Generate Style Transferred Image) - -- Adjust **Structure Weight** and **Color Weight**. -- These are weights for the style image, so the larger the value, the closer the resulting image will be to the style image. -- Hit the **Generate** button. -''') - with gr.Row(): - with gr.Column(): - with gr.Row(): - structure_weight = gr.Slider(label='Structure Weight', - minimum=0, - maximum=1, - step=0.1, - value=0.6) - with gr.Row(): - color_weight = gr.Slider(label='Color Weight', - minimum=0, - maximum=1, - step=0.1, - value=1) - with gr.Row(): - structure_only = gr.Checkbox(label='Structure Only') - with gr.Row(): - generate_button = gr.Button('Generate') - - with gr.Column(): - result = gr.Image(label='Result') - - with gr.Row(): - gr.Examples(examples=[ - [0.6, 1.0], - [0.3, 1.0], - [0.0, 1.0], - [1.0, 0.0], - ], - inputs=[structure_weight, color_weight]) - - detect_button.click(fn=model.detect_and_align_face, - inputs=input_image, - outputs=aligned_face) - reconstruct_button.click(fn=model.reconstruct_face, - inputs=aligned_face, - outputs=[reconstructed_face, instyle]) - style_type.change(fn=update_slider, inputs=style_type, outputs=style_index) - style_type.change(fn=update_style_image, - inputs=style_type, - outputs=style_image) - generate_button.click(fn=model.generate, - inputs=[ - style_type, - style_index, - structure_weight, - color_weight, - structure_only, - instyle, - ], - outputs=result) -demo.queue(max_size=10).launch() diff --git a/spaces/sub314xxl/MetaGPT/metagpt/actions/skill_action.py b/spaces/sub314xxl/MetaGPT/metagpt/actions/skill_action.py deleted file mode 100644 index 758591fdd7838ba3d45efbe9dd40c0ce8508c93f..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/actions/skill_action.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/8/28 -@Author : mashenquan -@File : skill_action.py -@Desc : Call learned skill -""" -from __future__ import annotations - -import ast -import importlib -import traceback -from copy import deepcopy - -from metagpt.actions import Action, ActionOutput -from metagpt.learn.skill_loader import Skill -from metagpt.logs import logger - - -class ArgumentsParingAction(Action): - def __init__(self, last_talk: str, skill: Skill, context=None, llm=None, **kwargs): - super(ArgumentsParingAction, self).__init__(name="", context=context, llm=llm) - self.skill = skill - self.ask = last_talk - self.rsp = None - self.args = None - - @property - def prompt(self): - prompt = f"{self.skill.name} function parameters description:\n" - for k, v in self.skill.arguments.items(): - prompt += f"parameter `{k}`: {v}\n" - prompt += "\n" - prompt += "Examples:\n" - for e in self.skill.examples: - prompt += f"If want you to do `{e.ask}`, return `{e.answer}` brief and clear.\n" - prompt += f"\nNow I want you to do `{self.ask}`, return in examples format above, brief and clear." - return prompt - - async def run(self, *args, **kwargs) -> ActionOutput: - prompt = self.prompt - logger.info(prompt) - rsp = await self.llm.aask(msg=prompt, system_msgs=[]) - logger.info(rsp) - self.args = ArgumentsParingAction.parse_arguments(skill_name=self.skill.name, txt=rsp) - self.rsp = ActionOutput(content=rsp) - return self.rsp - - @staticmethod - def parse_arguments(skill_name, txt) -> dict: - prefix = skill_name + "(" - if prefix not in txt: - logger.error(f"{skill_name} not in {txt}") - return None - if ")" not in txt: - logger.error(f"')' not in {txt}") - return None - begin_ix = txt.find(prefix) - end_ix = txt.rfind(")") - args_txt = txt[begin_ix + len(prefix) : end_ix] - logger.info(args_txt) - fake_expression = f"dict({args_txt})" - parsed_expression = ast.parse(fake_expression, mode="eval") - args = {} - for keyword in parsed_expression.body.keywords: - key = keyword.arg - value = ast.literal_eval(keyword.value) - args[key] = value - return args - - -class SkillAction(Action): - def __init__(self, skill: Skill, args: dict, context=None, llm=None, **kwargs): - super(SkillAction, self).__init__(name="", context=context, llm=llm) - self._skill = skill - self._args = args - self.rsp = None - - async def run(self, *args, **kwargs) -> str | ActionOutput | None: - """Run action""" - options = deepcopy(kwargs) - if self._args: - for k in self._args.keys(): - if k in options: - options.pop(k) - try: - self.rsp = await self.find_and_call_function(self._skill.name, args=self._args, **options) - except Exception as e: - logger.exception(f"{e}, traceback:{traceback.format_exc()}") - self.rsp = f"Error: {e}" - return ActionOutput(content=self.rsp, instruct_content=self._skill.json()) - - @staticmethod - async def find_and_call_function(function_name, args, **kwargs): - try: - module = importlib.import_module("metagpt.learn") - function = getattr(module, function_name) - # 调用函数并返回结果 - result = await function(**args, **kwargs) - return result - except (ModuleNotFoundError, AttributeError): - logger.error(f"{function_name} not found") - return None - - -if __name__ == "__main__": - ArgumentsParingAction.parse_arguments( - skill_name="text_to_image", txt='`text_to_image(text="Draw an apple", size_type="512x512")`' - ) diff --git a/spaces/sub314xxl/MetaGPT/metagpt/const.py b/spaces/sub314xxl/MetaGPT/metagpt/const.py deleted file mode 100644 index fbc2c928a14b0b5770b266dd79b02b2c7814880a..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/const.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/1 11:59 -@Author : alexanderwu -@File : const.py' -@Modified By: mashenquan, 2023/8/28. Add 'OPTIONS', 'DEFAULT_LANGUAGE', 'DEFAULT_MAX_TOKENS'... -""" -import contextvars -from pathlib import Path - - -def get_project_root(): - """逐级向上寻找项目根目录""" - current_path = Path.cwd() - while True: - if ( - (current_path / ".git").exists() - or (current_path / ".project_root").exists() - or (current_path / ".gitignore").exists() - ): - return current_path - parent_path = current_path.parent - if parent_path == current_path: - raise Exception("Project root not found.") - current_path = parent_path - - -PROJECT_ROOT = get_project_root() -DATA_PATH = PROJECT_ROOT / "data" -WORKSPACE_ROOT = PROJECT_ROOT / "workspace" -PROMPT_PATH = PROJECT_ROOT / "metagpt/prompts" -UT_PATH = PROJECT_ROOT / "data/ut" -SWAGGER_PATH = UT_PATH / "files/api/" -UT_PY_PATH = UT_PATH / "files/ut/" -API_QUESTIONS_PATH = UT_PATH / "files/question/" -YAPI_URL = "http://yapi.deepwisdomai.com/" -TMP = PROJECT_ROOT / "tmp" -RESEARCH_PATH = DATA_PATH / "research" - -MEM_TTL = 24 * 30 * 3600 - -OPTIONS = contextvars.ContextVar("OPTIONS") -DEFAULT_LANGUAGE = "English" -DEFAULT_MAX_TOKENS = 1500 -COMMAND_TOKENS = 500 -BRAIN_MEMORY = "BRAIN_MEMORY" -SKILL_PATH = "SKILL_PATH" -SERPER_API_KEY = "SERPER_API_KEY" - -# Key Definitions for MetaGPT LLM -METAGPT_API_MODEL = "METAGPT_API_MODEL" -METAGPT_API_KEY = "METAGPT_API_KEY" -METAGPT_API_BASE = "METAGPT_API_BASE" -METAGPT_API_TYPE = "METAGPT_API_TYPE" -METAGPT_API_VERSION = "METAGPT_API_VERSION" - -# format -BASE64_FORMAT = "base64" diff --git a/spaces/sub314xxl/MusicGen-Continuation/Makefile b/spaces/sub314xxl/MusicGen-Continuation/Makefile deleted file mode 100644 index 5bfd89dd833d7448b21073eb6ee7cfac1d5157dd..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MusicGen-Continuation/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -default: linter tests - -install: - pip install -U pip - pip install -U -e '.[dev]' - -linter: - flake8 audiocraft && mypy audiocraft - flake8 tests && mypy tests - -tests: - coverage run -m pytest tests - coverage report --include 'audiocraft/*' - -docs: - pdoc3 --html -o docs -f audiocraft - -dist: - python setup.py sdist - -.PHONY: linter tests docs dist diff --git a/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/position_encoding.py b/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/position_encoding.py deleted file mode 100644 index f60587a41d5f3b26c247ef569523ec4a595bd4b8..0000000000000000000000000000000000000000 --- a/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/position_encoding.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py -""" -Various positional encodings for the transformer. -""" -import math - -import torch -from torch import nn - - -class PositionEmbeddingSine(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, x, mask=None): - if mask is None: - mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) - not_mask = ~mask - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.num_pos_feats) - - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack( - (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos_y = torch.stack( - (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 - ).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos diff --git a/spaces/sujitpal/clip-rsicd-demo/dashboard_featurefinder.py b/spaces/sujitpal/clip-rsicd-demo/dashboard_featurefinder.py deleted file mode 100644 index cc58012f6f173293732c6318a1b4fa6c6d82859a..0000000000000000000000000000000000000000 --- a/spaces/sujitpal/clip-rsicd-demo/dashboard_featurefinder.py +++ /dev/null @@ -1,160 +0,0 @@ -import jax -import flax -import matplotlib.pyplot as plt -import nmslib -import numpy as np -import os -import requests -import streamlit as st - -from tempfile import NamedTemporaryFile -from torchvision.transforms import Compose, Resize, ToPILImage -from transformers import CLIPProcessor, FlaxCLIPModel -from PIL import Image - -import utils - -BASELINE_MODEL = "openai/clip-vit-base-patch32" -MODEL_PATH = "flax-community/clip-rsicd-v2" - -IMAGE_VECTOR_FILE = "./vectors/test-bs128x8-lr5e-6-adam-ckpt-1.tsv" - -IMAGES_DIR = "./images" -DEMO_IMAGES_DIR = "./demo-images" - - -def split_image(X): - num_rows = X.shape[0] // 224 - num_cols = X.shape[1] // 224 - Xc = X[0 : num_rows * 224, 0 : num_cols * 224, :] - patches = [] - for j in range(num_rows): - for i in range(num_cols): - patches.append(Xc[j * 224 : (j + 1) * 224, - i * 224 : (i + 1) * 224, - :]) - return num_rows, num_cols, patches - - -def get_patch_probabilities(patches, searched_feature, - image_preprocesor, - model, processor): - images = [image_preprocesor(patch) for patch in patches] - text = "An aerial image of {:s}".format(searched_feature) - inputs = processor(images=images, - text=text, - return_tensors="jax", - padding=True) - outputs = model(**inputs) - probs = jax.nn.softmax(outputs.logits_per_text, axis=-1) - probs_np = np.asarray(probs)[0] - return probs_np - - -def get_image_ranks(probs): - temp = np.argsort(-probs) - ranks = np.empty_like(temp) - ranks[temp] = np.arange(len(probs)) - return ranks - - -def download_and_prepare_image(image_url): - """ - Take input image and resize it to 672x896 - """ - try: - image_raw = requests.get(image_url, stream=True,).raw - image = Image.open(image_raw).convert("RGB") - width, height = image.size - # print("WID,HGT:", width, height) - if width < 224 or height < 224: - return None - # take the short edge and reduce to 672 - if width < height: - resize_factor = 672 / width - image = image.resize((672, int(height * resize_factor))) - image = image.crop((0, 0, 672, 896)) - else: - resize_factor = 672 / height - image = image.resize((int(width * resize_factor), 896)) - image = image.crop((0, 0, 896, 672)) - return np.asarray(image) - except Exception as e: - # print(e) - return None - - - -def app(): - model, processor = utils.load_model(MODEL_PATH, BASELINE_MODEL) - - st.title("Find Features in Images") - st.markdown(""" - This demo shows the ability of the model to find specific features - (specified as text queries) in the image. As an example, say you wish to - find the parts of the following image that contain a `beach`, `houses`, - or `ships`. We partition the image into tiles of (224, 224) and report - how likely each of them are to contain each text features. - """) - st.image("demo-images/st_tropez_1.png") - st.image("demo-images/st_tropez_2.png") - st.markdown(""" - For this image and the queries listed above, our model reports that the - two left tiles are most likely to contain a `beach`, the two top right - tiles are most likely to contain `houses`, and the two bottom right tiles - are likely to contain `boats`. - - We have provided a few representative images from [Unsplash](https://unsplash.com/s/photos/aerial-view) - that you can experiment with. Use the image name to put in an initial feature - to look for, this will show the original image, and you will get more ideas - for features that you can ask the model to identify. - """) - image_file = st.selectbox( - "Sample Image File", - options=[ - "-- select one --", - "St-Tropez-Port.jpg", - "Acopulco-Bay.jpg", - "Highway-through-Forest.jpg", - "Forest-with-River.jpg", - "Eagle-Bay-Coastline.jpg", - "Multistoreyed-Buildings.jpg", - "Street-View-Malayasia.jpg", - ]) - image_url = st.text_input( - "OR provide an image URL", - value="https://static.eos.com/wp-content/uploads/2019/04/Main.jpg") - searched_feature = st.text_input("Feature to find", value="beach") - - if st.button("Find"): - if image_file.startswith("--"): - image = download_and_prepare_image(image_url) - else: - image = plt.imread(os.path.join("demo-images", image_file)) - - if image is None: - st.error("Image could not be downloaded, please try another one") - else: - st.image(image, caption="Input Image") - st.markdown("---") - num_rows, num_cols, patches = split_image(image) - image_preprocessor = Compose([ - ToPILImage(), - Resize(224) - ]) - num_rows, num_cols, patches = split_image(image) - patch_probs = get_patch_probabilities( - patches, - searched_feature, - image_preprocessor, - model, - processor) - patch_ranks = get_image_ranks(patch_probs) - pid = 0 - for i in range(num_rows): - cols = st.columns(num_cols) - for col in cols: - caption = "#{:d} p({:s})={:.3f}".format( - patch_ranks[pid] + 1, searched_feature, patch_probs[pid]) - col.image(patches[pid], caption=caption) - pid += 1 diff --git a/spaces/sunilbhatia/hackathon1/app/Hackathon_setup/exp_recognition.py b/spaces/sunilbhatia/hackathon1/app/Hackathon_setup/exp_recognition.py deleted file mode 100644 index ae4a578972eb2345c1ed8207946db8152dbe8c21..0000000000000000000000000000000000000000 --- a/spaces/sunilbhatia/hackathon1/app/Hackathon_setup/exp_recognition.py +++ /dev/null @@ -1,79 +0,0 @@ -import numpy as np -import cv2 -from matplotlib import pyplot as plt -import torch -# In the below line,remove '.' while working on your local system.However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it. -from .exp_recognition_model import * -from PIL import Image -import base64 -import io -import os -## Add more imports if required - -############################################################################################################################# -# Caution: Don't change any of the filenames, function names and definitions # -# Always use the current_path + file_name for refering any files, without it we cannot access files on the server # -############################################################################################################################# - -# Current_path stores absolute path of the file from where it runs. -current_path = os.path.dirname(os.path.abspath(__file__)) - - -#1) The below function is used to detect faces in the given image. -#2) It returns only one image which has maximum area out of all the detected faces in the photo. -#3) If no face is detected,then it returns zero(0). - -def detected_face(image): - eye_haar = current_path + '/haarcascade_eye.xml' - face_haar = current_path + '/haarcascade_frontalface_default.xml' - face_cascade = cv2.CascadeClassifier(face_haar) - eye_cascade = cv2.CascadeClassifier(eye_haar) - gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - faces = face_cascade.detectMultiScale(gray, 1.3, 5) - face_areas=[] - images = [] - required_image=0 - for i, (x,y,w,h) in enumerate(faces): - face_cropped = gray[y:y+h, x:x+w] - face_areas.append(w*h) - images.append(face_cropped) - required_image = images[np.argmax(face_areas)] - required_image = Image.fromarray(required_image) - return required_image - - -#1) Images captured from mobile is passed as parameter to the below function in the API call, It returns the Expression detected by your network. -#2) The image is passed to the function in base64 encoding, Code for decoding the image is provided within the function. -#3) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode. -#4) Perform necessary transformations to the input(detected face using the above function), this should return the Expression in string form ex: "Anger" -#5) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function -##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function -def get_expression(img): - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - ########################################################################################## - ##Example for loading a model using weight state dictionary: ## - ## face_det_net = facExpRec() #Example Network ## - ## model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device) ## - ## face_det_net.load_state_dict(model['net_dict']) ## - ## ## - ##current_path + '/' is path of the saved model if present in ## - ##the same path as this file, we recommend to put in the same directory ## - ########################################################################################## - ########################################################################################## - - face = detected_face(img) - if face==0: - face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)) - face = trnscm(face).unsqueeze(0) - - - # YOUR CODE HERE, load the model - expression_model = ExpressionCNN() - model = torch.load(current_path + '/expression_model_gpu.t7', map_location=device) - expression_model.load_state_dict(model['net_dict']) - - outputs = expression_model(face) - _, predicted = torch.max(outputs.data, 1) - - return classes[predicted.item()] diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/images.py b/spaces/supertori/files/stable-diffusion-webui/modules/images.py deleted file mode 100644 index 04b2727bd5c3cb6faeb6aa38c24d61f8f2f6ea4b..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/images.py +++ /dev/null @@ -1,669 +0,0 @@ -import datetime -import sys -import traceback - -import pytz -import io -import math -import os -from collections import namedtuple -import re - -import numpy as np -import piexif -import piexif.helper -from PIL import Image, ImageFont, ImageDraw, PngImagePlugin -from fonts.ttf import Roboto -import string -import json -import hashlib - -from modules import sd_samplers, shared, script_callbacks, errors -from modules.shared import opts, cmd_opts - -LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) - - -def image_grid(imgs, batch_size=1, rows=None): - if rows is None: - if opts.n_rows > 0: - rows = opts.n_rows - elif opts.n_rows == 0: - rows = batch_size - elif opts.grid_prevent_empty_spots: - rows = math.floor(math.sqrt(len(imgs))) - while len(imgs) % rows != 0: - rows -= 1 - else: - rows = math.sqrt(len(imgs)) - rows = round(rows) - if rows > len(imgs): - rows = len(imgs) - - cols = math.ceil(len(imgs) / rows) - - params = script_callbacks.ImageGridLoopParams(imgs, cols, rows) - script_callbacks.image_grid_callback(params) - - w, h = imgs[0].size - grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black') - - for i, img in enumerate(params.imgs): - grid.paste(img, box=(i % params.cols * w, i // params.cols * h)) - - return grid - - -Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"]) - - -def split_grid(image, tile_w=512, tile_h=512, overlap=64): - w = image.width - h = image.height - - non_overlap_width = tile_w - overlap - non_overlap_height = tile_h - overlap - - cols = math.ceil((w - overlap) / non_overlap_width) - rows = math.ceil((h - overlap) / non_overlap_height) - - dx = (w - tile_w) / (cols - 1) if cols > 1 else 0 - dy = (h - tile_h) / (rows - 1) if rows > 1 else 0 - - grid = Grid([], tile_w, tile_h, w, h, overlap) - for row in range(rows): - row_images = [] - - y = int(row * dy) - - if y + tile_h >= h: - y = h - tile_h - - for col in range(cols): - x = int(col * dx) - - if x + tile_w >= w: - x = w - tile_w - - tile = image.crop((x, y, x + tile_w, y + tile_h)) - - row_images.append([x, tile_w, tile]) - - grid.tiles.append([y, tile_h, row_images]) - - return grid - - -def combine_grid(grid): - def make_mask_image(r): - r = r * 255 / grid.overlap - r = r.astype(np.uint8) - return Image.fromarray(r, 'L') - - mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0)) - mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1)) - - combined_image = Image.new("RGB", (grid.image_w, grid.image_h)) - for y, h, row in grid.tiles: - combined_row = Image.new("RGB", (grid.image_w, h)) - for x, w, tile in row: - if x == 0: - combined_row.paste(tile, (0, 0)) - continue - - combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w) - combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0)) - - if y == 0: - combined_image.paste(combined_row, (0, 0)) - continue - - combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h) - combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap)) - - return combined_image - - -class GridAnnotation: - def __init__(self, text='', is_active=True): - self.text = text - self.is_active = is_active - self.size = None - - -def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0): - def wrap(drawing, text, font, line_length): - lines = [''] - for word in text.split(): - line = f'{lines[-1]} {word}'.strip() - if drawing.textlength(line, font=font) <= line_length: - lines[-1] = line - else: - lines.append(word) - return lines - - def get_font(fontsize): - try: - return ImageFont.truetype(opts.font or Roboto, fontsize) - except Exception: - return ImageFont.truetype(Roboto, fontsize) - - def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize): - for i, line in enumerate(lines): - fnt = initial_fnt - fontsize = initial_fontsize - while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0: - fontsize -= 1 - fnt = get_font(fontsize) - drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center") - - if not line.is_active: - drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4) - - draw_y += line.size[1] + line_spacing - - fontsize = (width + height) // 25 - line_spacing = fontsize // 2 - - fnt = get_font(fontsize) - - color_active = (0, 0, 0) - color_inactive = (153, 153, 153) - - pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4 - - cols = im.width // width - rows = im.height // height - - assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}' - assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}' - - calc_img = Image.new("RGB", (1, 1), "white") - calc_d = ImageDraw.Draw(calc_img) - - for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)): - items = [] + texts - texts.clear() - - for line in items: - wrapped = wrap(calc_d, line.text, fnt, allowed_width) - texts += [GridAnnotation(x, line.is_active) for x in wrapped] - - for line in texts: - bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt) - line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1]) - line.allowed_width = allowed_width - - hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts] - ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts] - - pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2 - - result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), "white") - - for row in range(rows): - for col in range(cols): - cell = im.crop((width * col, height * row, width * (col+1), height * (row+1))) - result.paste(cell, (pad_left + (width + margin) * col, pad_top + (height + margin) * row)) - - d = ImageDraw.Draw(result) - - for col in range(cols): - x = pad_left + (width + margin) * col + width / 2 - y = pad_top / 2 - hor_text_heights[col] / 2 - - draw_texts(d, x, y, hor_texts[col], fnt, fontsize) - - for row in range(rows): - x = pad_left / 2 - y = pad_top + (height + margin) * row + height / 2 - ver_text_heights[row] / 2 - - draw_texts(d, x, y, ver_texts[row], fnt, fontsize) - - return result - - -def draw_prompt_matrix(im, width, height, all_prompts, margin=0): - prompts = all_prompts[1:] - boundary = math.ceil(len(prompts) / 2) - - prompts_horiz = prompts[:boundary] - prompts_vert = prompts[boundary:] - - hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))] - ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))] - - return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin) - - -def resize_image(resize_mode, im, width, height, upscaler_name=None): - """ - Resizes an image with the specified resize_mode, width, and height. - - Args: - resize_mode: The mode to use when resizing the image. - 0: Resize the image to the specified width and height. - 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. - 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. - im: The image to resize. - width: The width to resize the image to. - height: The height to resize the image to. - upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img. - """ - - upscaler_name = upscaler_name or opts.upscaler_for_img2img - - def resize(im, w, h): - if upscaler_name is None or upscaler_name == "None" or im.mode == 'L': - return im.resize((w, h), resample=LANCZOS) - - scale = max(w / im.width, h / im.height) - - if scale > 1.0: - upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name] - assert len(upscalers) > 0, f"could not find upscaler named {upscaler_name}" - - upscaler = upscalers[0] - im = upscaler.scaler.upscale(im, scale, upscaler.data_path) - - if im.width != w or im.height != h: - im = im.resize((w, h), resample=LANCZOS) - - return im - - if resize_mode == 0: - res = resize(im, width, height) - - elif resize_mode == 1: - ratio = width / height - src_ratio = im.width / im.height - - src_w = width if ratio > src_ratio else im.width * height // im.height - src_h = height if ratio <= src_ratio else im.height * width // im.width - - resized = resize(im, src_w, src_h) - res = Image.new("RGB", (width, height)) - res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) - - else: - ratio = width / height - src_ratio = im.width / im.height - - src_w = width if ratio < src_ratio else im.width * height // im.height - src_h = height if ratio >= src_ratio else im.height * width // im.width - - resized = resize(im, src_w, src_h) - res = Image.new("RGB", (width, height)) - res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) - - if ratio < src_ratio: - fill_height = height // 2 - src_h // 2 - res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0)) - res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h)) - elif ratio > src_ratio: - fill_width = width // 2 - src_w // 2 - res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0)) - res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0)) - - return res - - -invalid_filename_chars = '<>:"/\\|?*\n' -invalid_filename_prefix = ' ' -invalid_filename_postfix = ' .' -re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') -re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)") -re_pattern_arg = re.compile(r"(.*)<([^>]*)>$") -max_filename_part_length = 128 - - -def sanitize_filename_part(text, replace_spaces=True): - if text is None: - return None - - if replace_spaces: - text = text.replace(' ', '_') - - text = text.translate({ord(x): '_' for x in invalid_filename_chars}) - text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length] - text = text.rstrip(invalid_filename_postfix) - return text - - -class FilenameGenerator: - replacements = { - 'seed': lambda self: self.seed if self.seed is not None else '', - 'steps': lambda self: self.p and self.p.steps, - 'cfg': lambda self: self.p and self.p.cfg_scale, - 'width': lambda self: self.image.width, - 'height': lambda self: self.image.height, - 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False), - 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False), - 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash), - 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.model_name, replace_spaces=False), - 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'), - 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime