diff --git a/spaces/0xSynapse/PixelFusion/README.md b/spaces/0xSynapse/PixelFusion/README.md
deleted file mode 100644
index 2f0556a90084b2c7f67b53b96dc0df99c571267f..0000000000000000000000000000000000000000
--- a/spaces/0xSynapse/PixelFusion/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: PixelFusion
-emoji: 🔥
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
-license: gpl-3.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/data/realesrgan_dataset.py b/spaces/17TheWord/RealESRGAN/realesrgan/data/realesrgan_dataset.py
deleted file mode 100644
index 4cf2d9e6583a6789b771679734ce55bb8a22e628..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/RealESRGAN/realesrgan/data/realesrgan_dataset.py
+++ /dev/null
@@ -1,192 +0,0 @@
-import cv2
-import math
-import numpy as np
-import os
-import os.path as osp
-import random
-import time
-import torch
-from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
-from basicsr.data.transforms import augment
-from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
-from basicsr.utils.registry import DATASET_REGISTRY
-from torch.utils import data as data
-
-
-@DATASET_REGISTRY.register()
-class RealESRGANDataset(data.Dataset):
- """Dataset used for Real-ESRGAN model:
- Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
-
- It loads gt (Ground-Truth) images, and augments them.
- It also generates blur kernels and sinc kernels for generating low-quality images.
- Note that the low-quality images are processed in tensors on GPUS for faster processing.
-
- Args:
- opt (dict): Config for train datasets. It contains the following keys:
- dataroot_gt (str): Data root path for gt.
- meta_info (str): Path for meta information file.
- io_backend (dict): IO backend type and other kwarg.
- use_hflip (bool): Use horizontal flips.
- use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
- Please see more options in the codes.
- """
-
- def __init__(self, opt):
- super(RealESRGANDataset, self).__init__()
- self.opt = opt
- self.file_client = None
- self.io_backend_opt = opt['io_backend']
- self.gt_folder = opt['dataroot_gt']
-
- # file client (lmdb io backend)
- if self.io_backend_opt['type'] == 'lmdb':
- self.io_backend_opt['db_paths'] = [self.gt_folder]
- self.io_backend_opt['client_keys'] = ['gt']
- if not self.gt_folder.endswith('.lmdb'):
- raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
- with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
- self.paths = [line.split('.')[0] for line in fin]
- else:
- # disk backend with meta_info
- # Each line in the meta_info describes the relative path to an image
- with open(self.opt['meta_info']) as fin:
- paths = [line.strip().split(' ')[0] for line in fin]
- self.paths = [os.path.join(self.gt_folder, v) for v in paths]
-
- # blur settings for the first degradation
- self.blur_kernel_size = opt['blur_kernel_size']
- self.kernel_list = opt['kernel_list']
- self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
- self.blur_sigma = opt['blur_sigma']
- self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
- self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
- self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
-
- # blur settings for the second degradation
- self.blur_kernel_size2 = opt['blur_kernel_size2']
- self.kernel_list2 = opt['kernel_list2']
- self.kernel_prob2 = opt['kernel_prob2']
- self.blur_sigma2 = opt['blur_sigma2']
- self.betag_range2 = opt['betag_range2']
- self.betap_range2 = opt['betap_range2']
- self.sinc_prob2 = opt['sinc_prob2']
-
- # a final sinc filter
- self.final_sinc_prob = opt['final_sinc_prob']
-
- self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
- # TODO: kernel range is now hard-coded, should be in the configure file
- self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
- self.pulse_tensor[10, 10] = 1
-
- def __getitem__(self, index):
- if self.file_client is None:
- self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
-
- # -------------------------------- Load gt images -------------------------------- #
- # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
- gt_path = self.paths[index]
- # avoid errors caused by high latency in reading files
- retry = 3
- while retry > 0:
- try:
- img_bytes = self.file_client.get(gt_path, 'gt')
- except (IOError, OSError) as e:
- logger = get_root_logger()
- logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}')
- # change another file to read
- index = random.randint(0, self.__len__())
- gt_path = self.paths[index]
- time.sleep(1) # sleep 1s for occasional server congestion
- else:
- break
- finally:
- retry -= 1
- img_gt = imfrombytes(img_bytes, float32=True)
-
- # -------------------- Do augmentation for training: flip, rotation -------------------- #
- img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot'])
-
- # crop or pad to 400
- # TODO: 400 is hard-coded. You may change it accordingly
- h, w = img_gt.shape[0:2]
- crop_pad_size = 400
- # pad
- if h < crop_pad_size or w < crop_pad_size:
- pad_h = max(0, crop_pad_size - h)
- pad_w = max(0, crop_pad_size - w)
- img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101)
- # crop
- if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size:
- h, w = img_gt.shape[0:2]
- # randomly choose top and left coordinates
- top = random.randint(0, h - crop_pad_size)
- left = random.randint(0, w - crop_pad_size)
- img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...]
-
- # ------------------------ Generate kernels (used in the first degradation) ------------------------ #
- kernel_size = random.choice(self.kernel_range)
- if np.random.uniform() < self.opt['sinc_prob']:
- # this sinc filter setting is for kernels ranging from [7, 21]
- if kernel_size < 13:
- omega_c = np.random.uniform(np.pi / 3, np.pi)
- else:
- omega_c = np.random.uniform(np.pi / 5, np.pi)
- kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
- else:
- kernel = random_mixed_kernels(
- self.kernel_list,
- self.kernel_prob,
- kernel_size,
- self.blur_sigma,
- self.blur_sigma, [-math.pi, math.pi],
- self.betag_range,
- self.betap_range,
- noise_range=None)
- # pad kernel
- pad_size = (21 - kernel_size) // 2
- kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
-
- # ------------------------ Generate kernels (used in the second degradation) ------------------------ #
- kernel_size = random.choice(self.kernel_range)
- if np.random.uniform() < self.opt['sinc_prob2']:
- if kernel_size < 13:
- omega_c = np.random.uniform(np.pi / 3, np.pi)
- else:
- omega_c = np.random.uniform(np.pi / 5, np.pi)
- kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
- else:
- kernel2 = random_mixed_kernels(
- self.kernel_list2,
- self.kernel_prob2,
- kernel_size,
- self.blur_sigma2,
- self.blur_sigma2, [-math.pi, math.pi],
- self.betag_range2,
- self.betap_range2,
- noise_range=None)
-
- # pad kernel
- pad_size = (21 - kernel_size) // 2
- kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
-
- # ------------------------------------- the final sinc kernel ------------------------------------- #
- if np.random.uniform() < self.opt['final_sinc_prob']:
- kernel_size = random.choice(self.kernel_range)
- omega_c = np.random.uniform(np.pi / 3, np.pi)
- sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
- sinc_kernel = torch.FloatTensor(sinc_kernel)
- else:
- sinc_kernel = self.pulse_tensor
-
- # BGR to RGB, HWC to CHW, numpy to tensor
- img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]
- kernel = torch.FloatTensor(kernel)
- kernel2 = torch.FloatTensor(kernel2)
-
- return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path}
- return return_d
-
- def __len__(self):
- return len(self.paths)
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acronis True Image 2020 Bootable ISO Build 20770 The Ultimate Backup Solution.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acronis True Image 2020 Bootable ISO Build 20770 The Ultimate Backup Solution.md
deleted file mode 100644
index 3720780f17dad29daba5e26d7b0fdd40c2c1fc0a..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acronis True Image 2020 Bootable ISO Build 20770 The Ultimate Backup Solution.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
Acronis True Image 2020 Bootable ISO Build 20770: What Is It and Why You Need It
-
If you are looking for a reliable and easy-to-use backup and recovery solution for your Windows or Mac computer, you might have heard of Acronis True Image 2020. This software allows you to create an exact copy of your system, including all your files, applications, settings, and preferences. You can store this copy on an external drive, a cloud service, or a network location, and use it to restore your system in case of a disaster.
But what if your computer does not boot or you have a new computer without any operating system installed on it? How can you access and restore your backup in such situations? This is where Acronis True Image 2020 Bootable ISO Build 20770 comes in handy. This is a standalone version of Acronis True Image that you can use to boot your computer and restore your system from a backup. In this article, we will explain what this software is, how to create it, and how to use it.
-
What is Acronis True Image 2020?
-
Acronis True Image 2020 is a comprehensive backup and recovery software that protects your data from any threat, such as ransomware, hardware failure, natural disaster, human error, or theft. With Acronis True Image 2020, you can:
-
-
Create full-image backups of your entire system or selected partitions.
-
Create incremental or differential backups to save time and storage space.
-
Create file-level backups of specific folders or files.
-
Create disk clones to migrate your system to a new drive.
-
Create Acronis Survival Kit, a combination of bootable media and full-image backup on a single external drive.
-
Encrypt your backups with AES-256 encryption.
-
Compress your backups to save storage space.
-
Split your backups into multiple files or volumes.
-
Schedule your backups to run automatically at specific times or events.
-
Validate your backups to ensure their integrity and reliability.
-
Browse and recover individual files or folders from your backups.
-
Restore your entire system or selected partitions from your backups.
-
Restore your system to dissimilar hardware with Acronis Universal Restore.
-
Recover your system from ransomware attacks with Acronis Active Protection.
-
Sync your files across multiple devices with Acronis Cloud Storage.
-
Access and manage your backups remotely with Acronis Mobile App.
-
-
Features and benefits of Acronis True Image 2020
-
Acronis True Image 2020 offers many features and benefits that make it one of the best backup and recovery solutions on the market. Here are some of them:
-
-
Easy-to-use interface: Acronis True Image 2020 has a simple and intuitive interface that guides you through the backup and recovery process. You can easily select what to back up, where to store it, how often to run it, and how to restore it. You can also monitor the status of your backups, view backup statistics, manage backup settings, and more.
-
Fast and reliable performance: Acronis True Image 2020 uses advanced technologies to ensure fast and reliable backup and recovery performance. For example, it uses sector-by-sector imaging to create exact copies of your system, it uses incremental or differential backup methods to back up only the changes since the last backup, it uses compression and deduplication techniques to reduce the size of your backups, it uses validation tools to check the integrity of your backups, and more.
-
Versatile backup options: Acronis True Image 2020 gives you the flexibility to choose how to back up your data. You can back up your entire system or selected partitions, you can back up specific folders or files, you can back up to an external drive or a cloud service or a network location, you can back up manually or automatically or on demand, you can back up on a daily or weekly or monthly basis or based on events, you can back up with encryption or compression or splitting options, and more.
-
Powerful recovery options: Acronis True Image 2020 gives you the ability to recover your data in any situation. You can recover your entire system or selected partitions from a full-image backup, you can recover individual files or folders from a file-level backup, you can recover your system to the same or dissimilar hardware with Acronis Universal Restore, you can recover your system from ransomware attacks with Acronis Active Protection, you can recover your data from any device with Acronis Mobile App, and more.
-
-
What is new in Acronis True Image 2020 Build 20770?
-
Acronis True Image 2020 Build 20770 is the latest update for Acronis True Image 2020 that was released on October 5th, 2019. This update introduces some new features and improvements for the software. Here are some of them:
-
-
New backup format: This update introduces a new technology for disk-level backup that improves the performance and reliability of the software. The new backup format supports larger disks (up to 10 TB), faster backup creation (up to twice as fast), faster backup validation (up to three times faster), faster backup browsing (up to ten times faster), faster recovery (up to two times faster), better compression (up to ten percent smaller), better encryption (AES-XTS mode), better deduplication (sector-level), better error correction (Reed-Solomon codes), better resilience (self-healing backups), better compatibility (with third-party software), better security (against ransomware attacks), better scalability (for large-scale deployments), and more. The new backup format is available for Windows only. To use it, you need to create new disk-level backups with this update. Existing disk-level backups will remain in the old format until they are recreated with this update.
-
New notification center: This update introduces a new notification center that displays all important messages about the status of your backups in one place. You can access the notification center by clicking on the bell icon in the upper right corner of the software interface. The notification center shows you information such as backup errors, warnings, successes, recommendations, tips, news, updates, offers, and more. You can also customize which notifications you want to see and how often you want to see them.
-
New dark mode: This update introduces a new dark mode option that changes the appearance of the software interface to a dark theme. The dark mode option is available for Windows only. To enable it, go to Settings > Appearance > Dark mode.
-
-
What is a bootable media and how to create it with Acronis True Image 2020?
-
What is a bootable media and why you need it?
-
A bootable media is a USB flash drive or a CD/DVD with Acronis True Image software on it. This way, you can boot your computer with this media and access the software without installing it on your hard drive. A bootable media is useful in situations where:
-
-
Your computer does not 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avira Antivirus for Windows 10 32 Bit A Complete Guide.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avira Antivirus for Windows 10 32 Bit A Complete Guide.md
deleted file mode 100644
index 4de7bda6ec9467a907fda575c5e7e0529ce5174d..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avira Antivirus for Windows 10 32 Bit A Complete Guide.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
How to Download and Install Avira Antivirus for Windows 10 32 Bit
-
If you are looking for a reliable and free antivirus software for your Windows 10 32 bit computer, you might want to consider Avira Free Antivirus. Avira is one of the most popular and trusted antivirus solutions in the market, with over 35 years of online security experience and millions of satisfied users. In this article, we will show you how to download and install Avira Antivirus for Windows 10 32 bit in a few easy steps.
-
Why Choose Avira Antivirus for Windows 10 32 Bit?
-
Avira Antivirus for Windows 10 32 bit offers you several advantages over other antivirus programs. Here are some of them:
-
avira antivirus free download for windows 10 32 bit with crack
It protects you from all online threats, such as viruses, worms, trojans, ransomware, spyware, adware, and rootkits.
-
It has a low system impact, meaning it does not slow down your computer or interfere with your performance.
-
It has a near-perfect detection rate, thanks to its intelligent learning algorithms and award-winning technology.
-
It has an intuitive interface, making it easy to use and customize.
-
It is compatible with Windows 10, Windows 11, Windows 8, and Windows 7.
-
It is completely free and does not show any ads or sell your data.
-
-
How to Download Avira Antivirus for Windows 10 32 Bit?
-
To download Avira Antivirus for Windows 10 32 bit, follow these steps:
-
-
Go to the official website of Avira and click on the "Download for free" button.
-
Choose the option "Avira Free Antivirus for Windows" and click on the "Download now" button.
-
Save the file to your computer and run it once the download is complete.
-
-
How to Install Avira Antivirus for Windows 10 32 Bit?
-
To install Avira Antivirus for Windows 10 32 bit, follow these steps:
-
-
After running the downloaded file, click on the "Accept and install" button to start the installation process.
-
Wait for the installation to finish. It may take a few minutes depending on your internet speed and system configuration.
-
Once the installation is done, you will see a confirmation message. Click on the "Open Avira" button to launch the program.
-
You can also create an account or log in with your existing account to access more features and settings.
-
-
How to Use Avira Antivirus for Windows 10 32 Bit?
-
To use Avira Antivirus for Windows 10 32 bit, follow these steps:
-
-
To scan your computer for malware, click on the "Scan" button on the main screen. You can choose between a quick scan, a full scan, or a custom scan.
-
To update your antivirus database, click on the "Update" button on the main screen. You can also enable automatic updates in the settings.
-
To adjust your security settings, click on the "Settings" button on the main screen. You can change various options such as real-time protection, firewall, web protection, email protection, ransomware protection, etc.
-
To access more features and tools, click on the "More tools" button on the main screen. You can find useful utilities such as password manager, VPN service, software updater, system speedup, etc.
-
-
-
We hope this article helped you learn how to download and install Avira Antivirus for Windows 10 32 bit. If you have any questions or feedback, feel free to leave a comment below. Stay safe online with Avira!
- ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Kpg 141d Learn How to Program NX Series Radios with KPG-141D FPU.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Kpg 141d Learn How to Program NX Series Radios with KPG-141D FPU.md
deleted file mode 100644
index 3c0974bfea8d1224c39cdc446d26df9c98ca36e1..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Kpg 141d Learn How to Program NX Series Radios with KPG-141D FPU.md
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
What is Crack Kpg 141d and why you need it
-
If you own a Kenwood radio, you may have heard of Crack Kpg 141d. It is a software that allows you to program your radio without paying for the official Kenwood programming software. But what exactly is Crack Kpg 141d and how does it work? In this article, we will explain everything you need to know about Crack Kpg 141d, including how to download, install, and use it, as well as the benefits and risks of using it. We will also provide some alternatives to Crack Kpg 141d in case you are looking for other options.
The first step to use Crack Kpg 141d is to download it from a reliable source. There are many websites that offer Crack Kpg 141d for free, but some of them may contain viruses, malware, or spyware that can harm your computer or radio. Therefore, you should be careful when choosing where to download Crack Kpg 141d from. One of the most trusted sources for downloading Crack Kpg 141d is HamFiles, a website that provides various radio programming software, patches, cracks, and serial numbers. Here are the steps to download Crack Kpg 141d from HamFiles:
-
-
Go to this link and log in or register an account.
-
Click on the "Download" button and wait for the file to be downloaded.
-
The file name should be "KPG-141D_v5.10.zip" and the size should be about 6 MB.
-
-
Once you have downloaded Crack Kpg 141d, you need to install it on your computer. Here are the steps to install Crack Kpg 141d:
-
-
Extract the zip file using a program like WinRAR or WinZip.
-
Open the folder "KPG-141D_v5.10" and double-click on the file "Setup.exe".
-
Follow the instructions on the screen and accept the terms and conditions.
-
When prompted, enter the serial number that is provided in the folder "Serial Number".
-
Complete the installation process and restart your computer if necessary.
-
-
How to use Crack Kpg 141d to program Kenwood radios
-
After installing Crack Kpg 141d, you can use it to program your Kenwood radios. Crack Kpg 141d supports various models and frequency ranges of Kenwood radios, such as NX-206, NX-220, NX-306, NX-320, NX-420, NX-720, NX-720G, NX-720H, NX-720HG, NX-820, NX-820G, NX-820H, NX-820HG, NX-920, NX-920G. Here are the steps to use Crack Kpg 141d to program your Kenwood radios:
-
-
Connect your Kenwood radio to your computer using a USB cable or a programming cable. Make sure your radio is turned off before connecting it.
-
Launch Crack Kpg 141d from your desktop or start menu.
-
Select your radio model and frequency range from the drop-down menus at the top left corner of the screen.
-
Click on "Read" to read the data from your radio or click on "New" to create a new data file.
-
Customize the settings and features of your radio according to your preferences. You can change things like channel frequency, power output, squelch level, tone code, scan list, signaling system, etc.
-
Click on "Write" to write the data to your radio or click on "Save" to save the data file on your computer.
-
Verify that your radio works correctly by testing its functions and features.
-
-
The benefits and risks of using Crack Kpg 141d
-
Using Crack Kpg 141d has some benefits and risks that you should be aware of before deciding whether to use it or not. Here are some of them:
-
Benefit 1: You can access all the functions and options of your Kenwood radio without paying for the official software
-
Benefit 2: You can program multiple radios with different models and frequencies using one software
-
Another benefit of using Crack Kpg 141d is that you can program multiple radios with different models and frequencies using one software. This can save you time and money, as you don't need to buy different software for each radio model or frequency range. For example, you can use Crack Kpg 141d to program NX-206, NX-220, NX-306, NX-320, NX-420, NX-720, NX-720G, NX-720H, NX-720HG, NX-820, NX-820G, NX-820H, NX-820HG, NX-920, NX-920G radios with various frequency ranges from 136 MHz to 950 MHz. You can also customize each radio individually or clone the same data to multiple radios.
-
Risk 1: You may violate the license agreement and warranty of your Kenwood radio by using unauthorized software
-
One of the risks of using Crack Kpg 141d is that you may violate the license agreement and warranty of your Kenwood radio by using unauthorized software. According to Kenwood's website, the use of unauthorized software may result in "damage to your radio or computer equipment" and "loss of warranty coverage". Kenwood also states that "the use of unauthorized software may be illegal" and that "Kenwood is not responsible for any problems caused by unauthorized software". Therefore, you should be aware of the legal and ethical implications of using Crack Kpg 141d before deciding to use it.
-
Risk 2: You may damage your radio or computer by using corrupted or infected files
-
Another risk of using Crack Kpg 141d is that you may damage your radio or computer by using corrupted or infected files. As mentioned earlier, some websites that offer Crack Kpg 141d for free may contain viruses, malware, or spyware that can harm your computer or radio. Even if you download Crack Kpg 141d from a reliable source like HamFiles, there is no guarantee that the file is safe and error-free. You may encounter problems like data corruption, system crash, device malfunction, or data loss. Therefore, you should always scan the file with an antivirus program before installing it and backup your data before programming your radio.
-
The alternatives to Crack Kpg 141d
-
If you are not comfortable with using Crack Kpg 141d or if you want to avoid the risks associated with it, you may consider some alternatives to Crack Kpg 141d. Here are some of them:
-
Alternative 1: Buy the original Kenwood programming software from an authorized dealer or online store
-
The most obvious alternative to Crack Kpg 141d is to buy the original Kenwood programming software from an authorized dealer or online store. This way, you can ensure that you are using a legitimate and safe software that is compatible with your Kenwood radio. You can also enjoy the full support and warranty from Kenwood and avoid any legal or ethical issues. However, the downside of this alternative is that it can be quite expensive and hard to find. For example, according to Radio Software Online, the original price of KPG-141D is $150 USD. You may also need to buy different software for different radio models or frequency ranges.
-
Kenwood radio programming software download
-Kenwood KPG-141D FPU (v5.1) Programming Software – HamFiles
-Kenwood KPG-141D FPU (v5.1) Programming Software for NX series
-Kenwood KPG-141D FPU (v5.1) Programming Software Supported Languages
-Kenwood KPG-141D FPU (v5.1) Programming Software Compatible Operating Systems
-Kenwood KPG-141D FPU (v5.1) Programming Software Download Link
-Kenwood KPG-141D FPU (v5.1) Programming Software Rating and Reviews
-Kenwood KPG-141D FPU (v5.1) Programming Software Images and Screenshots
-Kenwood KPG-141D FPU (v5.1) Programming Software Installation Guide
-Kenwood KPG-141D FPU (v5.1) Programming Software User Manual
-Kenwood KPG-141D FPU (v5.1) Programming Software Serial Number
-Kenwood KPG-141D FPU (v5.1) Programming Software Crack Patch Keygen
-Kenwood KPG-141D FPU (v5.1) Programming Software License Activation Code
-Kenwood KPG-141D FPU (v5.1) Programming Software Free Trial Version
-Kenwood KPG-141D FPU (v5.1) Programming Software Full Version Download
-Kenwood KPG-141D FPU (v5.1) Programming Software Alternative Software
-Kenwood KPG-141D FPU (v5.1) Programming Software Features and Benefits
-Kenwood KPG-141D FPU (v5.1) Programming Software Requirements and Specifications
-Kenwood KPG-141D FPU (v5.1) Programming Software Troubleshooting and Support
-Kenwood KPG-141D FPU (v5.1) Programming Software Updates and Upgrades
-How to use Kenwood KPG-141D FPU (v5.1) Programming Software
-How to program NX series radios with Kenwood KPG-141D FPU (v5.1) Programming Software
-How to crack Kenwood KPG-141D FPU (v5.1) Programming Software
-How to activate Kenwood KPG-141D FPU (v5.1) Programming Software license
-How to get Kenwood KPG-141D FPU (v5.1) Programming Software for free
-How to download Kenwood KPG-141D FPU (v5.1) Programming Software from HamFiles
-How to install Kenwood KPG-141D FPU (v5.1) Programming Software on Windows 10
-How to update Kenwood KPG-141D FPU (v5.1) Programming Software to the latest version
-How to uninstall Kenwood KPG-141D FPU (v5.1) Programming Software from your computer
-How to backup and restore your data with Kenwood KPG-141D FPU (v5.1) Programming Software
-What is the difference between Kenwood KPG-141D FPU and other programming software
-What are the advantages of using Kenwood KPG-141D FPU over other programming software
-What are the limitations of using Kenwood KPG-141D FPU for programming radios
-What are the best practices for using Kenwood KPG-141D FPU safely and securely
-What are the common errors and issues with Kenwood KPG-141D FPU and how to fix them
-Where can I find more information about Kenwood KPG-141D FPU and its features
-Where can I buy or order Kenwood KPG
-
Alternative 2: Use other free or low-cost programming software that are compatible with Kenwood radios, such as CHIRP or RT Systems
-
Another alternative to Crack Kpg 141d is to use other free or low-cost programming software that are compatible with Kenwood radios, such as CHIRP or RT Systems. These are third-party software that can program various models and brands of radios, including Kenwood radios. They are usually easy to use and have many features and options. They are also updated regularly and have a large community of users who can provide help and feedback. However, the downside of this alternative is that they may not support all the functions and options of your Kenwood radio. They may also have some bugs or errors that can affect your programming process.
-
Conclusion
-
In conclusion, Crack Kpg 141d is a software that allows you to program your Kenwood radio without paying for the official Kenwood programming software. It has some benefits and risks that you should weigh before deciding whether to use it or not. It also has some alternatives that you can consider if you are looking for other options. We hope this article has helped you understand what Crack Kpg 141d is and how to use it. However, we do not endorse or recommend using Crack Kpg 141d for any purpose. We advise you to use it at your own risk and responsibility.
-
FAQs
-
-
What is Crack Kpg 141d?
-Crack Kpg 141d is a software that allows you to program your Kenwood radio without paying for the official Kenwood programming software.
-
Where can I download Crack Kpg 141d?
-You can download Crack Kpg 141d from various websites that offer it for free, but some of them may contain viruses, malware, or spyware that can harm your computer or radio. One of the most trusted sources for downloading Crack Kpg 141d is HamFiles, a website that provides various radio programming software, patches, cracks, and serial numbers.
-
How do I install Crack Kpg 141d?
-You need to extract the zip file and run the setup file. Then follow the instructions on the screen and enter the serial number that is provided in the folder "Serial Number".
-
How do I use Crack Kpg 141d?
-You need to connect your Kenwood radio to your computer using a USB cable or a programming cable. Then launch Crack Kpg 141d and select your radio model and frequency range. Then customize the settings and features of your radio according to your preferences. Then write the data to your radio and verify that it works correctly.
-
What are the benefits and risks of using Crack Kpg 141d?
-The benefits of using Crack Kpg 141d are that you can access all the functions and options of your Kenwood radio without paying for the official software and that you can program multiple radios with different models and frequencies using one software. The risks of using Crack Kpg 141d are that you may violate the license agreement and warranty of your Kenwood radio by using unauthorized software and that you may damage your radio or computer by using corrupted or infected files.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Bus Driver Simulator 2019 DLC Unlocker-PLAZA.md b/spaces/1gistliPinn/ChatGPT4/Examples/Bus Driver Simulator 2019 DLC Unlocker-PLAZA.md
deleted file mode 100644
index fe3409d61c48c427fe8ae7d5f417496f3b4391f0..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Bus Driver Simulator 2019 DLC Unlocker-PLAZA.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA: How to Enjoy the Full Experience of Driving Different Buses
-
-
Bus Driver Simulator 2019 is a game that lets you experience the life of a passenger bus driver. You can drive different buses from different countries and eras, such as Soviet, European, American, and Hungarian buses. You can also explore real cities and their suburbs, such as Moscow, Serpukhov, Cologne, Paris, and Budapest. You can customize your bus with various skins, stickers, horns, and accessories. You can complete various scenarios with pre-set conditions or build your own career in free mode.
However, if you want to enjoy the full experience of the game, you may need to buy some DLCs that add more content and features to the game. These DLCs are:
-
-
-
Bus Driver Simulator 2019 - Hungarian Legend: This DLC adds a legendary Hungarian bus Ikarus 250.93 to the game.
-
Bus Driver Simulator 2019 - Soviet Legend: This DLC adds a legendary Soviet bus LAZ-695 to the game.
-
Bus Driver Simulator 2019 - European Minibus: This DLC adds a modern European minibus Mercedes-Benz Sprinter to the game.
-
Bus Driver Simulator 2019 - Old Legend: This DLC adds an old American school bus International Harvester Loadstar to the game.
-
-
-
But what if you don't want to spend money on these DLCs? What if you want to get them for free? Well, there is a solution for this: Bus Driver Simulator 2019 DLC Unlocker-PLAZA.
-
-
What is Bus Driver Simulator 2019 DLC Unlocker-PLAZA?
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a file that you can download and install on your PC to unlock all the DLCs for the game. This way, you can play with all the buses, maps, and options that the game has to offer, without paying anything.
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is also compatible with the latest version of the game, v3.9.68, which includes many updates and improvements to make the game more realistic and enjoyable.
-
-
Where to Download Bus Driver Simulator 2019 DLC Unlocker-PLAZA?
-
-
You can find many websites that offer Bus Driver Simulator 2019 DLC Unlocker-PLAZA for free, but you need to be careful and choose a reliable and trustworthy source. Some websites may contain viruses or malware that can harm your PC or steal your personal information.
-
-
-
One of the best websites to download Bus Driver Simulator 2019 DLC Unlocker-PLAZA is Skidrow & Reloaded Games. This website has been around for a long time and provides safe and working torrent files for many PC games. You can download Bus Driver Simulator 2019 DLC Unlocker-PLAZA from this link: https://www.skidrowreloaded.com/bus-driver-simulator-2019-plaza/
-
-
How to Install Bus Driver Simulator 2019 DLC Unlocker-PLAZA?
-
-
Installing Bus Driver Simulator 2019 DLC Unlocker-PLAZA is very easy and simple. Here are the steps you need to follow:
-
-
-
Download Bus Driver Simulator 2019 DLC Unlocker-PLAZA from Skidrow & Reloaded Games or another trusted website.
-
Extract the file using a program like WinRAR or 7-Zip. You will get a folder called PLAZA.
-
Copy the folder and paste it into your Bus Driver Simulator 2019 installation folder. This is usually located in C:\Program Files (x86)\Steam\steamapps\common\Bus Driver Simulator 2019\ or C:\Program Files\Steam\steamapps\common\Bus Driver Simulator 2019\ depending on your system.
-
Merge the folder with the existing one. You may need to confirm this action or provide administrator permission.
-
Run the game as usual by launching it from Steam or using a shortcut on your desktop.
-
-
-
Congratulations! You have successfully installed Bus Driver Simulator 2019 DLC Unlocker-PLAZA and you can now play with all the DLCs for free.
-
-
Why Should You Use Bus Driver Simulator 2019 DLC Unlocker-PLAZA?
-
-
There are many benefits of using Bus Driver Simulator 2019 DLC Unlocker-PLAZA to play the game. Here are some of them:
-
-
-
You can save money by not having to buy the DLCs separately.
-
You can have more fun and variety by playing with different buses, maps, and options.
-
You can enjoy the game with better performance and stability, as the unlocker eliminates some bugs and errors that may occur with the DLCs.
-
You can use the unlocker with any version of the game, including v3.9.68, which is the most updated and improved one.
-
-
-
Conclusion
-
-
Bus Driver Simulator 2019 is a game that deserves to be played by every simulation fan who loves driving games. However, if you want to enjoy the full experience of the game, you may need to get some DLCs that add more content and features to the game. That's why Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a great solution for this problem.
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a file that you can download and install on your PC to unlock all the DLCs for the game. It is easy to use, compatible with any version of the game, and works with any patch or mod that you want to use. It also improves the performance and stability of the game, making it more enjoyable and realistic.
-
-
If you want to play Bus Driver Simulator 2019 with all the content and features for free, download Bus Driver Simulator 2019 DLC Unlocker-PLAZA today and experience the best driving simulation game ever made.
-
What are the Features of Bus Driver Simulator 2019 DLC Unlocker-PLAZA?
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is not just a simple file that unlocks the DLCs for the game. It also adds some features and options that make the game more enjoyable and realistic. Here are some of them:
-
-
-
You can choose from different weather conditions and time of day, such as sunny, cloudy, rainy, snowy, day, night, etc.
-
You can adjust the traffic density and difficulty level, such as easy, normal, hard, etc.
-
You can enable or disable the realistic physics and damage system, such as collisions, breakdowns, tire wear, etc.
-
You can enable or disable the realistic passenger behavior and feedback system, such as boarding, alighting, paying, complaining, etc.
-
You can enable or disable the realistic traffic rules and regulations system, such as speed limits, traffic lights, signs, fines, etc.
-
-
-
How to Play Bus Driver Simulator 2019 DLC Unlocker-PLAZA?
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that is easy to play but hard to master. You need to have good driving skills and knowledge of the traffic rules and regulations. You also need to have good management skills and customer service skills. Here are some tips and tricks to help you play the game:
-
-
-
Choose a bus that suits your style and preference. You can choose from different buses with different characteristics, such as speed, acceleration, handling, fuel consumption, capacity, etc.
-
Choose a map that suits your mood and challenge. You can choose from different maps with different locations, routes, landmarks, scenery, etc.
-
Choose a mode that suits your goal and interest. You can choose from different modes with different objectives, conditions, rewards, etc. You can play scenarios with pre-set goals and situations. You can play free mode with your own rules and settings. You can also play online multiplayer mode with other players or friends.
-
Drive carefully and responsibly. You need to follow the traffic rules and regulations. You need to avoid accidents and damages. You need to respect other road users and pedestrians. You need to drive smoothly and safely.
-
Manage your bus and passengers well. You need to check your bus condition and fuel level. You need to service your bus regularly and repair it when needed. You need to pick up and drop off passengers at designated stops. You need to collect fares and give change. You need to satisfy your passengers and deal with their complaints.
-
-
-
Conclusion
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that will give you a lot of fun and satisfaction. It is a game that will let you experience the life of a passenger bus driver. It is a game that will let you drive different buses from different countries and eras. It is a game that will let you explore real cities and their suburbs. It is a game that will let you customize your bus with various skins, stickers, horns, and accessories. It is a game that will let you complete various scenarios with pre-set conditions or build your own career in free mode.
-
-
If you want to play this amazing game without a CD, download Bus Driver Simulator 2019 DLC Unlocker-PLAZA today and enjoy the best driving simulation game ever made.
-
What are the Reviews of Bus Driver Simulator 2019 DLC Unlocker-PLAZA?
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that has received positive reviews from many players and critics. Here are some of the reviews that you can find online:
-
-
-
"This game is amazing. I love driving different buses and exploring different cities. The graphics are great and the physics are realistic. The DLCs add more content and variety to the game. The unlocker works perfectly and saves me money. I highly recommend this game to anyone who likes driving games." - Steam user
-
"This game is a hidden gem. It is one of the best driving simulation games I have ever played. The game is very immersive and challenging. The DLCs are awesome and add more buses, maps, and options to the game. The unlocker is easy to use and compatible with any version of the game. This game is a must-have for any simulation fan." - Skidrow & Reloaded Games user
-
"This game is a lot of fun and satisfaction. It is a game that lets you experience the life of a passenger bus driver. The game is very realistic and enjoyable. The DLCs are worth it and add more content and features to the game. The unlocker is a great solution for this problem. It unlocks all the DLCs for free and improves the performance and stability of the game. This game is a great value for money." - JJ Riley user
-
-
-
What are the Alternatives to Bus Driver Simulator 2019 DLC Unlocker-PLAZA?
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that may not be suitable for everyone. Some people may not like driving games or simulation games. Some people may not like the idea of using an unlocker to get free DLCs. Some people may have technical issues or compatibility problems with the game or the unlocker. If you are one of these people, you may want to look for some alternatives to Bus Driver Simulator 2019 DLC Unlocker-PLAZA. Here are some of them:
-
-
-
Bus Simulator 18: This is another bus driving simulation game that lets you drive different buses in a huge open world map. You can also create your own routes, customize your buses, play online multiplayer mode, and use mods.
-
Omsi 2: This is another bus driving simulation game that lets you drive different buses from different eras in realistic scenarios. You can also create your own maps, vehicles, scripts, and sounds.
-
Fernbus Simulator: This is another bus driving simulation game that lets you drive modern coaches across Germany and Europe. You can also experience realistic traffic, weather, passengers, damage, and accidents.
-
-
-
Conclusion
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that will give you a lot of fun and satisfaction. It is a game that will let you experience the life of a passenger bus driver. It is a game that will let you drive different buses from different countries and eras. It is a game that will let you explore real cities and their suburbs. It is a game that will let you customize your bus with various skins, stickers, horns, and accessories. It is a game that will let you complete various scenarios with pre-set conditions or build your own career in free mode.
-
-
If you want to play this amazing game without a CD, download Bus Driver Simulator 2019 DLC Unlocker-PLAZA today and enjoy the best driving simulation game ever made.
-
Conclusion
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that deserves to be played by every simulation fan who loves driving games. However, if you want to enjoy the full experience of the game, you may need to get some DLCs that add more content and features to the game. That's why Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a great solution for this problem.
-
-
Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a file that you can download and install on your PC to unlock all the DLCs for the game. It is easy to use, compatible with any version of the game, and works with any patch or mod that you want to use. It also improves the performance and stability of the game, making it more enjoyable and realistic.
-
-
If you want to play Bus Driver Simulator 2019 with all the content and features for free, download Bus Driver Simulator 2019 DLC Unlocker-PLAZA today and experience the best driving simulation game ever made.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/1line/AutoGPT/CONTRIBUTING.md b/spaces/1line/AutoGPT/CONTRIBUTING.md
deleted file mode 100644
index 79169a0c1951853303f73ffa1fddb3518685606a..0000000000000000000000000000000000000000
--- a/spaces/1line/AutoGPT/CONTRIBUTING.md
+++ /dev/null
@@ -1,105 +0,0 @@
-# Contributing to ProjectName
-
-First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
-
-This document provides guidelines and best practices to help you contribute effectively.
-
-## Table of Contents
-
-- [Code of Conduct](#code-of-conduct)
-- [Getting Started](#getting-started)
-- [How to Contribute](#how-to-contribute)
- - [Reporting Bugs](#reporting-bugs)
- - [Suggesting Enhancements](#suggesting-enhancements)
- - [Submitting Pull Requests](#submitting-pull-requests)
-- [Style Guidelines](#style-guidelines)
- - [Code Formatting](#code-formatting)
- - [Pre-Commit Hooks](#pre-commit-hooks)
-
-## Code of Conduct
-
-By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project.
-
-## 📢 A Quick Word
-Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
-
-However, you absolutely can still add these commands to Auto-GPT in the form of plugins. Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
-> ⚠️ Plugin support is expected to ship within the week. You can follow PR #757 for more updates!
-
-## Getting Started
-
-To start contributing, follow these steps:
-
-1. Fork the repository and clone your fork.
-2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
-3. Make your changes in the new branch.
-4. Test your changes thoroughly.
-5. Commit and push your changes to your fork.
-6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
-
-## How to Contribute
-
-### Reporting Bugs
-
-If you find a bug in the project, please create an issue on GitHub with the following information:
-
-- A clear, descriptive title for the issue.
-- A description of the problem, including steps to reproduce the issue.
-- Any relevant logs, screenshots, or other supporting information.
-
-### Suggesting Enhancements
-
-If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
-
-- A clear, descriptive title for the issue.
-- A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
-- Any relevant examples, mockups, or supporting information.
-
-### Submitting Pull Requests
-
-When submitting a pull request, please ensure that your changes meet the following criteria:
-
-- Your pull request should be atomic and focus on a single change.
-- Your pull request should include tests for your change.
-- You should have thoroughly tested your changes with multiple different prompts.
-- You should have considered potential risks and mitigations for your changes.
-- You should have documented your changes clearly and comprehensively.
-- You should not include any unrelated or "extra" small tweaks or changes.
-
-## Style Guidelines
-
-### Code Formatting
-
-We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`:
-
-```bash
-pip install black
-```
-
-To format your code, run the following command in the project's root directory:
-
-```bash
-black .
-```
-### Pre-Commit Hooks
-We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
-
-Install the pre-commit package using pip:
-```bash
-pip install pre-commit
-```
-
-Run the following command in the project's root directory to install the pre-commit hooks:
-```bash
-pre-commit install
-```
-
-Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
-
-If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
-
-Happy coding, and once again, thank you for your contributions!
-
-Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
-
-https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+
\ No newline at end of file
diff --git a/spaces/1line/AutoGPT/autogpt/commands/google_search.py b/spaces/1line/AutoGPT/autogpt/commands/google_search.py
deleted file mode 100644
index 7d38ce7568d2de207d521b077cfebd72527c9795..0000000000000000000000000000000000000000
--- a/spaces/1line/AutoGPT/autogpt/commands/google_search.py
+++ /dev/null
@@ -1,87 +0,0 @@
-"""Google search command for Autogpt."""
-from __future__ import annotations
-
-import json
-
-from duckduckgo_search import ddg
-
-from autogpt.config import Config
-
-CFG = Config()
-
-
-def google_search(query: str, num_results: int = 8) -> str:
- """Return the results of a Google search
-
- Args:
- query (str): The search query.
- num_results (int): The number of results to return.
-
- Returns:
- str: The results of the search.
- """
- search_results = []
- if not query:
- return json.dumps(search_results)
-
- results = ddg(query, max_results=num_results)
- if not results:
- return json.dumps(search_results)
-
- for j in results:
- search_results.append(j)
-
- return json.dumps(search_results, ensure_ascii=False, indent=4)
-
-
-def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
- """Return the results of a Google search using the official Google API
-
- Args:
- query (str): The search query.
- num_results (int): The number of results to return.
-
- Returns:
- str: The results of the search.
- """
-
- from googleapiclient.discovery import build
- from googleapiclient.errors import HttpError
-
- try:
- # Get the Google API key and Custom Search Engine ID from the config file
- api_key = CFG.google_api_key
- custom_search_engine_id = CFG.custom_search_engine_id
-
- # Initialize the Custom Search API service
- service = build("customsearch", "v1", developerKey=api_key)
-
- # Send the search query and retrieve the results
- result = (
- service.cse()
- .list(q=query, cx=custom_search_engine_id, num=num_results)
- .execute()
- )
-
- # Extract the search result items from the response
- search_results = result.get("items", [])
-
- # Create a list of only the URLs from the search results
- search_results_links = [item["link"] for item in search_results]
-
- except HttpError as e:
- # Handle errors in the API call
- error_details = json.loads(e.content.decode())
-
- # Check if the error is related to an invalid or missing API key
- if error_details.get("error", {}).get(
- "code"
- ) == 403 and "invalid API key" in error_details.get("error", {}).get(
- "message", ""
- ):
- return "Error: The provided Google API key is invalid or missing."
- else:
- return f"Error: {e}"
-
- # Return the list of search result URLs
- return search_results_links
diff --git a/spaces/1phancelerku/anime-remove-background/Download APK Real Boxing and Experience the Ultimate Fighting Game on Android.md b/spaces/1phancelerku/anime-remove-background/Download APK Real Boxing and Experience the Ultimate Fighting Game on Android.md
deleted file mode 100644
index d3ee2fd9327ccd006311f56d4a3bd94dd36aaf7f..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download APK Real Boxing and Experience the Ultimate Fighting Game on Android.md
+++ /dev/null
@@ -1,132 +0,0 @@
-
-
APK Real Boxing: A Review of the Best Fighting Game on Android
-
If you are a fan of fighting games and boxing simulators, you might have heard of APK Real Boxing, one of the most popular and realistic games on Google Play. But what is APK Real Boxing, and why is it so awesome? In this article, we will give you a comprehensive review of this game, its features, benefits, and how to play it like a pro. Read on to find out more!
APK Real Boxing is a fighting game and boxing simulator developed by Vivid Games S.A., a studio behind well-known online fighting games on mobile. It is based on the original KO boxing simulator that won the hearts of punching games fans worldwide. It uses Unreal Engine-powered graphics and realistic motion-captured animations to create a stunning and immersive boxing experience. You can download APK Real Boxing from various sources online, such as [APKCombo](^1^), [Google Play](^2^), or [Vivid Games](^3^).
-
The features and benefits of APK Real Boxing
-
APK Real Boxing has many features and benefits that make it stand out from other fighting games on Android. Here are some of them:
-
-
It has a full-blown career mode for your boxer, where you can knock out over 30 unique boxers with their own adaptive boxing styles and become the best boxer in the world.
-
It allows you to customize your own boxer with dozens of unlockable hairstyles, tattoos, and gear. You can also train in various mini-games to boost your speed, strength, and stamina.
-
It has a variety of punches and combos that you can use in the knockout boxing game. You can also tip the odds in your favor with power-ups and feel every jab, hook, and KO uppercut thanks to the intuitive controls.
-
It has exhilarating bonus modes where you can test your boxing skills against boxeo bosses in Arcade Mode or take on the Underground Tournament to unlock new gear for your boxer.
-
It has amazing graphics and sound that immerse you in the adrenaline-pumping world of boxing. You can also win prizes with Daily Rewards and Daily Spin.
-
-
How to download and install APK Real Boxing
-
To download and install APK Real Boxing, you need to follow these simple steps:
-
-
Choose a reliable source for downloading APK Real Boxing, such as [APKCombo](^1^), [Google Play](^2^), or [Vivid Games](^3^).
-
Click on the download button or link and wait for the file to be downloaded on your device.
-
Once the file is downloaded, locate it in your file manager and tap on it to start the installation process.
-
Follow the instructions on the screen and grant the necessary permissions to install the app.
-
After the installation is complete, launch the app and enjoy playing APK Real Boxing!
-
-
Why APK Real Boxing is the ultimate fighting game
-
Now that you know what APK Real Boxing is and how to get it, you might be wondering why it is the ultimate fighting game on Android. Well, there are many reasons why this game is so awesome, but we will focus on three main aspects: the knockout gameplay, the comprehensive career mode, and the exhilarating bonus modes.
-
The knockout gameplay and intuitive controls
-
The knockout gameplay of APK Real Boxing is one of its best features. It allows you to fight using a variety of punches and combos in a realistic boxing boxeo bosses in Arcade Mode or take on the Underground Tournament to unlock new gear for your boxer. You can also play in Real-Time Multiplayer mode and join other boxers in weekly tournaments and special events. The amazing graphics of APK Real Boxing are another reason why this game is so awesome. It uses Unreal Engine-powered graphics and realistic motion-captured animations to create a stunning and immersive boxing experience. You can feel every jab, hook, and KO uppercut thanks to the realistic physics and sound effects. You can also admire the detailed and lifelike models of the boxers and the environments.
-
How to play APK Real Boxing like a pro
-
Now that you know why APK Real Boxing is the ultimate fighting game on Android, you might want to learn how to play it like a pro. Well, there are many tips and tricks that can help you improve your boxing skills and win more fights. Here are some of them:
The basic moves and combos of APK Real Boxing are essential for any boxer. You need to master them to be able to fight effectively and efficiently. Here are some of the basic moves and combos you should know:
-
-
Jab: A quick and straight punch that can be used to keep your opponent at bay or set up other punches.
-
Hook: A powerful punch that can be thrown from either side and can cause a lot of damage if it lands on the chin or the temple.
-
Uppercut: A devastating punch that can be thrown from below and can knock out your opponent if it hits the jaw or the nose.
-
Body shot: A punch that targets the torso or the ribs of your opponent and can weaken their stamina and defense.
-
Combo: A series of punches that can be chained together to create a more effective attack. For example, you can use a jab-hook-uppercut combo or a body shot-hook-jab combo.
-
-
The power-ups and strategies
-
The power-ups and strategies of APK Real Boxing are important for any boxer. You need to use them wisely to gain an advantage over your opponent or turn the tide of the fight. Here are some of the power-ups and strategies you should use:
-
-
Health: A power-up that restores some of your health and can help you survive longer in the fight.
-
Stamina: A power-up that restores some of your stamina and can help you throw more punches and move faster in the fight.
-
Shield: A power-up that protects you from incoming punches for a short time and can help you avoid damage in the fight.
-
Fury: A power-up that increases your damage output for a short time and can help you deal more damage in the fight.
-
Dodge: A strategy that allows you to evade incoming punches by swiping left or right on the screen and can help you avoid damage in the fight.
-
Block: A strategy that allows you to defend yourself from incoming punches by holding down the block button on the screen and can help you reduce damage in the fight.
-
Counter: A strategy that allows you to retaliate after dodging or blocking an incoming punch by tapping on the screen and can help you deal more damage in the fight.
-
-
The tips and tricks from the experts
-
The tips and tricks from the experts of APK Real Boxing are useful for any boxer. You need to follow them to improve your boxing skills and win more fights. Here are some of the tips and tricks from the experts:
-
-
Train regularly in the mini-games to boost your speed, strength, and stamina.
-
Customize your boxer with gear that suits your style and preferences.
-
Fight against different boxers with different styles and learn from their strengths and weaknesses.
-
Use different punches and combos depending on the situation and your opponent's behavior.
-
Use power-ups strategically and don't waste them unnecessarily.
-
Dodge, block, and counter effectively and don't let your opponent hit you too much.
-
Be aggressive but not reckless and don't leave yourself open for attacks.
-
Be patient but not passive and don't let your opponent dictate the pace of the fight.
-
-
Conclusion
-
In conclusion, APK Real Boxing is a fighting game and boxing simulator that offers a realistic and immersive boxing experience on Android. It has many features and benefits that make it stand out from other fighting games on Android, such as the knockout gameplay, the comprehensive career mode, the exhilarating bonus modes, and the amazing graphics. It also has intuitive controls and customization options that make it easy to play and enjoy. It is a game that will challenge your boxing skills and entertain you for hours. If you are looking for a fighting game and boxing simulator that will give you a realistic and immersive boxing experience on Android, you should definitely try APK Real Boxing. You will not regret it!
Summary of the main points
-
To summarize, here are the main points of this article:
-
-
APK Real Boxing is a fighting game and boxing simulator developed by Vivid Games S.A., a studio behind well-known online fighting games on mobile.
-
It uses Unreal Engine-powered graphics and realistic motion-captured animations to create a stunning and immersive boxing experience.
-
It has a full-blown career mode for your boxer, where you can knock out over 30 unique boxers with their own adaptive boxing styles and become the best boxer in the world.
-
It allows you to customize your own boxer with dozens of unlockable hairstyles, tattoos, and gear. You can also train in various mini-games to boost your speed, strength, and stamina.
-
It has a variety of punches and combos that you can use in the knockout boxing game. You can also tip the odds in your favor with power-ups and feel every jab, hook, and KO uppercut thanks to the intuitive controls.
-
It has exhilarating bonus modes where you can test your boxing skills against boxeo bosses in Arcade Mode or take on the Underground Tournament to unlock new gear for your boxer.
-
It has many tips and tricks that can help you improve your boxing skills and win more fights.
-
-
Call to action and recommendation
-
If you are interested in APK Real Boxing and want to download it, you can do so from various sources online, such as [APKCombo], [Google Play], or [Vivid Games]. You can also visit the official website of APK Real Boxing to learn more about the game and its features. We highly recommend APK Real Boxing to anyone who loves fighting games and boxing simulators. It is a game that will keep you hooked and entertained for hours. So what are you waiting for? Download APK Real Boxing today and start your boxing career!
-
FAQs
-
Here are some of the frequently asked questions about APK Real Boxing:
-
-
Is APK Real Boxing free to play?
-Yes, APK Real Boxing is free to play. However, it contains in-app purchases that can enhance your gaming experience.
-
Is APK Real Boxing safe to download?
-Yes, APK Real Boxing is safe to download. However, you should always download it from reliable sources, such as [APKCombo], [Google Play], or [Vivid Games].
-
Is APK Real Boxing compatible with my device?
-APK Real Boxing is compatible with most Android devices that have Android 4.1 or higher. However, some devices may have performance issues or bugs due to different specifications.
-
How can I contact the developers of APK Real Boxing?
-You can contact the developers of APK Real Boxing by visiting their official website or their social media pages. You can also send them an email at support@vividgames.com.
-
How can I give feedback or report a problem with APK Real Boxing?
-You can give feedback or report a problem with APK Real Boxing by using the in-game feedback option or by sending an email to support@vividgames.com.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/22h/vintedois-diffusion-v0-2/README.md b/spaces/22h/vintedois-diffusion-v0-2/README.md
deleted file mode 100644
index 7e0cd99fa9ced46e8858cbfd461e55190a0e2af9..0000000000000000000000000000000000000000
--- a/spaces/22h/vintedois-diffusion-v0-2/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Vintedois Diffusion V0 2
-emoji: 📚
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/docs/eval.md b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/docs/eval.md
deleted file mode 100644
index dd1d9e257367b6422680966198646c45e5a2671d..0000000000000000000000000000000000000000
--- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/docs/eval.md
+++ /dev/null
@@ -1,31 +0,0 @@
-## Eval on ICCV2021-MFR
-
-coming soon.
-
-
-## Eval IJBC
-You can eval ijbc with pytorch or onnx.
-
-
-1. Eval IJBC With Onnx
-```shell
-CUDA_VISIBLE_DEVICES=0 python onnx_ijbc.py --model-root ms1mv3_arcface_r50 --image-path IJB_release/IJBC --result-dir ms1mv3_arcface_r50
-```
-
-2. Eval IJBC With Pytorch
-```shell
-CUDA_VISIBLE_DEVICES=0,1 python eval_ijbc.py \
---model-prefix ms1mv3_arcface_r50/backbone.pth \
---image-path IJB_release/IJBC \
---result-dir ms1mv3_arcface_r50 \
---batch-size 128 \
---job ms1mv3_arcface_r50 \
---target IJBC \
---network iresnet50
-```
-
-## Inference
-
-```shell
-python inference.py --weight ms1mv3_arcface_r50/backbone.pth --network r50
-```
diff --git a/spaces/801artistry/RVC801/infer/modules/ipex/hijacks.py b/spaces/801artistry/RVC801/infer/modules/ipex/hijacks.py
deleted file mode 100644
index b06f3a9c1a70ef515c30d0e7d749923ecb8d0bfe..0000000000000000000000000000000000000000
--- a/spaces/801artistry/RVC801/infer/modules/ipex/hijacks.py
+++ /dev/null
@@ -1,196 +0,0 @@
-import contextlib
-import importlib
-import torch
-import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
-
-# pylint: disable=protected-access, missing-function-docstring, line-too-long, unnecessary-lambda, no-else-return
-
-class CondFunc: # pylint: disable=missing-class-docstring
- def __new__(cls, orig_func, sub_func, cond_func):
- self = super(CondFunc, cls).__new__(cls)
- if isinstance(orig_func, str):
- func_path = orig_func.split('.')
- for i in range(len(func_path)-1, -1, -1):
- try:
- resolved_obj = importlib.import_module('.'.join(func_path[:i]))
- break
- except ImportError:
- pass
- for attr_name in func_path[i:-1]:
- resolved_obj = getattr(resolved_obj, attr_name)
- orig_func = getattr(resolved_obj, func_path[-1])
- setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
- self.__init__(orig_func, sub_func, cond_func)
- return lambda *args, **kwargs: self(*args, **kwargs)
- def __init__(self, orig_func, sub_func, cond_func):
- self.__orig_func = orig_func
- self.__sub_func = sub_func
- self.__cond_func = cond_func
- def __call__(self, *args, **kwargs):
- if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
- return self.__sub_func(self.__orig_func, *args, **kwargs)
- else:
- return self.__orig_func(*args, **kwargs)
-
-_utils = torch.utils.data._utils
-def _shutdown_workers(self):
- if torch.utils.data._utils is None or torch.utils.data._utils.python_exit_status is True or torch.utils.data._utils.python_exit_status is None:
- return
- if hasattr(self, "_shutdown") and not self._shutdown:
- self._shutdown = True
- try:
- if hasattr(self, '_pin_memory_thread'):
- self._pin_memory_thread_done_event.set()
- self._worker_result_queue.put((None, None))
- self._pin_memory_thread.join()
- self._worker_result_queue.cancel_join_thread()
- self._worker_result_queue.close()
- self._workers_done_event.set()
- for worker_id in range(len(self._workers)):
- if self._persistent_workers or self._workers_status[worker_id]:
- self._mark_worker_as_unavailable(worker_id, shutdown=True)
- for w in self._workers: # pylint: disable=invalid-name
- w.join(timeout=torch.utils.data._utils.MP_STATUS_CHECK_INTERVAL)
- for q in self._index_queues: # pylint: disable=invalid-name
- q.cancel_join_thread()
- q.close()
- finally:
- if self._worker_pids_set:
- torch.utils.data._utils.signal_handling._remove_worker_pids(id(self))
- self._worker_pids_set = False
- for w in self._workers: # pylint: disable=invalid-name
- if w.is_alive():
- w.terminate()
-
-class DummyDataParallel(torch.nn.Module): # pylint: disable=missing-class-docstring, unused-argument, too-few-public-methods
- def __new__(cls, module, device_ids=None, output_device=None, dim=0): # pylint: disable=unused-argument
- if isinstance(device_ids, list) and len(device_ids) > 1:
- print("IPEX backend doesn't support DataParallel on multiple XPU devices")
- return module.to("xpu")
-
-def return_null_context(*args, **kwargs): # pylint: disable=unused-argument
- return contextlib.nullcontext()
-
-def check_device(device):
- return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int))
-
-def return_xpu(device):
- return f"xpu:{device[-1]}" if isinstance(device, str) and ":" in device else f"xpu:{device}" if isinstance(device, int) else torch.device("xpu") if isinstance(device, torch.device) else "xpu"
-
-def ipex_no_cuda(orig_func, *args, **kwargs):
- torch.cuda.is_available = lambda: False
- orig_func(*args, **kwargs)
- torch.cuda.is_available = torch.xpu.is_available
-
-original_autocast = torch.autocast
-def ipex_autocast(*args, **kwargs):
- if len(args) > 0 and args[0] == "cuda":
- return original_autocast("xpu", *args[1:], **kwargs)
- else:
- return original_autocast(*args, **kwargs)
-
-original_torch_cat = torch.cat
-def torch_cat(tensor, *args, **kwargs):
- if len(tensor) == 3 and (tensor[0].dtype != tensor[1].dtype or tensor[2].dtype != tensor[1].dtype):
- return original_torch_cat([tensor[0].to(tensor[1].dtype), tensor[1], tensor[2].to(tensor[1].dtype)], *args, **kwargs)
- else:
- return original_torch_cat(tensor, *args, **kwargs)
-
-original_interpolate = torch.nn.functional.interpolate
-def interpolate(tensor, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False): # pylint: disable=too-many-arguments
- if antialias or align_corners is not None:
- return_device = tensor.device
- return_dtype = tensor.dtype
- return original_interpolate(tensor.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode,
- align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype)
- else:
- return original_interpolate(tensor, size=size, scale_factor=scale_factor, mode=mode,
- align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias)
-
-original_linalg_solve = torch.linalg.solve
-def linalg_solve(A, B, *args, **kwargs): # pylint: disable=invalid-name
- if A.device != torch.device("cpu") or B.device != torch.device("cpu"):
- return_device = A.device
- return original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device)
- else:
- return original_linalg_solve(A, B, *args, **kwargs)
-
-def ipex_hijacks():
- CondFunc('torch.Tensor.to',
- lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs),
- lambda orig_func, self, device=None, *args, **kwargs: check_device(device))
- CondFunc('torch.Tensor.cuda',
- lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs),
- lambda orig_func, self, device=None, *args, **kwargs: check_device(device))
- CondFunc('torch.empty',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.load',
- lambda orig_func, *args, map_location=None, **kwargs: orig_func(*args, return_xpu(map_location), **kwargs),
- lambda orig_func, *args, map_location=None, **kwargs: map_location is None or check_device(map_location))
- CondFunc('torch.randn',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.ones',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.zeros',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.tensor',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
- CondFunc('torch.linspace',
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
-
- CondFunc('torch.Generator',
- lambda orig_func, device=None: torch.xpu.Generator(device),
- lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu")
-
- CondFunc('torch.batch_norm',
- lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
- weight if weight is not None else torch.ones(input.size()[1], device=input.device),
- bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
- lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
- CondFunc('torch.instance_norm',
- lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
- weight if weight is not None else torch.ones(input.size()[1], device=input.device),
- bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
- lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
-
- #Functions with dtype errors:
- CondFunc('torch.nn.modules.GroupNorm.forward',
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
- CondFunc('torch.nn.modules.linear.Linear.forward',
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
- CondFunc('torch.nn.modules.conv.Conv2d.forward',
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
- CondFunc('torch.nn.functional.layer_norm',
- lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
- orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs),
- lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
- weight is not None and input.dtype != weight.data.dtype)
-
- #Diffusers Float64 (ARC GPUs doesn't support double or Float64):
- if not torch.xpu.has_fp64_dtype():
- CondFunc('torch.from_numpy',
- lambda orig_func, ndarray: orig_func(ndarray.astype('float32')),
- lambda orig_func, ndarray: ndarray.dtype == float)
-
- #Broken functions when torch.cuda.is_available is True:
- CondFunc('torch.utils.data.dataloader._BaseDataLoaderIter.__init__',
- lambda orig_func, *args, **kwargs: ipex_no_cuda(orig_func, *args, **kwargs),
- lambda orig_func, *args, **kwargs: True)
-
- #Functions that make compile mad with CondFunc:
- torch.utils.data.dataloader._MultiProcessingDataLoaderIter._shutdown_workers = _shutdown_workers
- torch.nn.DataParallel = DummyDataParallel
- torch.autocast = ipex_autocast
- torch.cat = torch_cat
- torch.linalg.solve = linalg_solve
- torch.nn.functional.interpolate = interpolate
- torch.backends.cuda.sdp_kernel = return_null_context
\ No newline at end of file
diff --git a/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/README.md b/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/README.md
deleted file mode 100644
index 2ae28940a1c2af5d71872c2e395f5eceea3bec65..0000000000000000000000000000000000000000
--- a/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-title: Wizardlm-13b-v1.2.Q4_0.gguf
-colorFrom: purple
-colorTo: blue
-sdk: docker
-models:
- - WizardLM/WizardLM-13B-V1.2
- - TheBloke/WizardLM-13B-V1.2-GGUF
-tags:
- - inference api
- - openai-api compatible
- - llama-cpp-python
- - WizardLM
- - gguf
-pinned: false
----
-
-# WizardLM-13B-V1.2-GGUF
-
-Please refer to the [index.html](index.html) for more information.
diff --git a/spaces/AIGC-Audio/AudioGPT/mono2binaural/src/utils.py b/spaces/AIGC-Audio/AudioGPT/mono2binaural/src/utils.py
deleted file mode 100644
index 074dd84bcb700650a615f75b37c6c54f6f211443..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/mono2binaural/src/utils.py
+++ /dev/null
@@ -1,251 +0,0 @@
-"""
-Copyright (c) Facebook, Inc. and its affiliates.
-All rights reserved.
-
-This source code is licensed under the license found in the
-LICENSE file in the root directory of this source tree.
-"""
-
-import numpy as np
-import torch as th
-#import torchaudio as ta
-
-
-class Net(th.nn.Module):
-
- def __init__(self, model_name="network", use_cuda=True):
- super().__init__()
- self.use_cuda = use_cuda
- self.model_name = model_name
-
- def save(self, model_dir, suffix=''):
- '''
- save the network to model_dir/model_name.suffix.net
- :param model_dir: directory to save the model to
- :param suffix: suffix to append after model name
- '''
- if self.use_cuda:
- self.cpu()
-
- if suffix == "":
- fname = f"{model_dir}/{self.model_name}.net"
- else:
- fname = f"{model_dir}/{self.model_name}.{suffix}.net"
-
- th.save(self.state_dict(), fname)
- if self.use_cuda:
- self.cuda()
-
- def load_from_file(self, model_file):
- '''
- load network parameters from model_file
- :param model_file: file containing the model parameters
- '''
- if self.use_cuda:
- self.cpu()
-
- states = th.load(model_file)
- self.load_state_dict(states)
-
- if self.use_cuda:
- self.cuda()
- print(f"Loaded: {model_file}")
-
- def load(self, model_dir, suffix=''):
- '''
- load network parameters from model_dir/model_name.suffix.net
- :param model_dir: directory to load the model from
- :param suffix: suffix to append after model name
- '''
- if suffix == "":
- fname = f"{model_dir}/{self.model_name}.net"
- else:
- fname = f"{model_dir}/{self.model_name}.{suffix}.net"
- self.load_from_file(fname)
-
- def num_trainable_parameters(self):
- '''
- :return: the number of trainable parameters in the model
- '''
- return sum(p.numel() for p in self.parameters() if p.requires_grad)
-
-
-# class NewbobAdam(th.optim.Adam):
-
-# def __init__(self,
-# weights,
-# net,
-# artifacts_dir,
-# initial_learning_rate=0.001,
-# decay=0.5,
-# max_decay=0.01
-# ):
-# '''
-# Newbob learning rate scheduler
-# :param weights: weights to optimize
-# :param net: the network, must be an instance of type src.utils.Net
-# :param artifacts_dir: (str) directory to save/restore models to/from
-# :param initial_learning_rate: (float) initial learning rate
-# :param decay: (float) value to decrease learning rate by when loss doesn't improve further
-# :param max_decay: (float) maximum decay of learning rate
-# '''
-# super().__init__(weights, lr=initial_learning_rate)
-# self.last_epoch_loss = np.inf
-# self.total_decay = 1
-# self.net = net
-# self.decay = decay
-# self.max_decay = max_decay
-# self.artifacts_dir = artifacts_dir
-# # store initial state as backup
-# if decay < 1.0:
-# net.save(artifacts_dir, suffix="newbob")
-
-# def update_lr(self, loss):
-# '''
-# update the learning rate based on the current loss value and historic loss values
-# :param loss: the loss after the current iteration
-# '''
-# if loss > self.last_epoch_loss and self.decay < 1.0 and self.total_decay > self.max_decay:
-# self.total_decay = self.total_decay * self.decay
-# print(f"NewbobAdam: Decay learning rate (loss degraded from {self.last_epoch_loss} to {loss})."
-# f"Total decay: {self.total_decay}")
-# # restore previous network state
-# self.net.load(self.artifacts_dir, suffix="newbob")
-# # decrease learning rate
-# for param_group in self.param_groups:
-# param_group['lr'] = param_group['lr'] * self.decay
-# else:
-# self.last_epoch_loss = loss
-# # save last snapshot to restore it in case of lr decrease
-# if self.decay < 1.0 and self.total_decay > self.max_decay:
-# self.net.save(self.artifacts_dir, suffix="newbob")
-
-
-# class FourierTransform:
-# def __init__(self,
-# fft_bins=2048,
-# win_length_ms=40,
-# frame_rate_hz=100,
-# causal=False,
-# preemphasis=0.0,
-# sample_rate=48000,
-# normalized=False):
-# self.sample_rate = sample_rate
-# self.frame_rate_hz = frame_rate_hz
-# self.preemphasis = preemphasis
-# self.fft_bins = fft_bins
-# self.win_length = int(sample_rate * win_length_ms / 1000)
-# self.hop_length = int(sample_rate / frame_rate_hz)
-# self.causal = causal
-# self.normalized = normalized
-# if self.win_length > self.fft_bins:
-# print('FourierTransform Warning: fft_bins should be larger than win_length')
-
-# def _convert_format(self, data, expected_dims):
-# if not type(data) == th.Tensor:
-# data = th.Tensor(data)
-# if len(data.shape) < expected_dims:
-# data = data.unsqueeze(0)
-# if not len(data.shape) == expected_dims:
-# raise Exception(f"FourierTransform: data needs to be a Tensor with {expected_dims} dimensions but got shape {data.shape}")
-# return data
-
-# def _preemphasis(self, audio):
-# if self.preemphasis > 0:
-# return th.cat((audio[:, 0:1], audio[:, 1:] - self.preemphasis * audio[:, :-1]), dim=1)
-# return audio
-
-# def _revert_preemphasis(self, audio):
-# if self.preemphasis > 0:
-# for i in range(1, audio.shape[1]):
-# audio[:, i] = audio[:, i] + self.preemphasis * audio[:, i-1]
-# return audio
-
-# def _magphase(self, complex_stft):
-# mag, phase = ta.functional.magphase(complex_stft, 1.0)
-# return mag, phase
-
-# def stft(self, audio):
-# '''
-# wrapper around th.stft
-# audio: wave signal as th.Tensor
-# '''
-# hann = th.hann_window(self.win_length)
-# hann = hann.cuda() if audio.is_cuda else hann
-# spec = th.stft(audio, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length,
-# window=hann, center=not self.causal, normalized=self.normalized)
-# return spec.contiguous()
-
-# def complex_spectrogram(self, audio):
-# '''
-# audio: wave signal as th.Tensor
-# return: th.Tensor of size channels x frequencies x time_steps (channels x y_axis x x_axis)
-# '''
-# self._convert_format(audio, expected_dims=2)
-# audio = self._preemphasis(audio)
-# return self.stft(audio)
-
-# def magnitude_phase(self, audio):
-# '''
-# audio: wave signal as th.Tensor
-# return: tuple containing two th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
-# '''
-# stft = self.complex_spectrogram(audio)
-# return self._magphase(stft)
-
-# def mag_spectrogram(self, audio):
-# '''
-# audio: wave signal as th.Tensor
-# return: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
-# '''
-# return self.magnitude_phase(audio)[0]
-
-# def power_spectrogram(self, audio):
-# '''
-# audio: wave signal as th.Tensor
-# return: power spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
-# '''
-# return th.pow(self.mag_spectrogram(audio), 2.0)
-
-# def phase_spectrogram(self, audio):
-# '''
-# audio: wave signal as th.Tensor
-# return: phase spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
-# '''
-# return self.magnitude_phase(audio)[1]
-
-# def mel_spectrogram(self, audio, n_mels):
-# '''
-# audio: wave signal as th.Tensor
-# n_mels: number of bins used for mel scale warping
-# return: mel spectrogram as th.Tensor of size channels x n_mels x time_steps for magnitude and phase spectrum
-# '''
-# spec = self.power_spectrogram(audio)
-# mel_warping = ta.transforms.MelScale(n_mels, self.sample_rate)
-# return mel_warping(spec)
-
-# def complex_spec2wav(self, complex_spec, length):
-# '''
-# inverse stft
-# complex_spec: complex spectrum as th.Tensor of size channels x frequencies x time_steps x 2 (real part/imaginary part)
-# length: length of the audio to be reconstructed (in frames)
-# '''
-# complex_spec = self._convert_format(complex_spec, expected_dims=4)
-# hann = th.hann_window(self.win_length)
-# hann = hann.cuda() if complex_spec.is_cuda else hann
-# wav = ta.functional.istft(complex_spec, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length, window=hann, length=length, center=not self.causal)
-# wav = self._revert_preemphasis(wav)
-# return wav
-
-# def magphase2wav(self, mag_spec, phase_spec, length):
-# '''
-# reconstruction of wav signal from magnitude and phase spectrum
-# mag_spec: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps
-# phase_spec: phase spectrum as th.Tensor of size channels x frequencies x time_steps
-# length: length of the audio to be reconstructed (in frames)
-# '''
-# mag_spec = self._convert_format(mag_spec, expected_dims=3)
-# phase_spec = self._convert_format(phase_spec, expected_dims=3)
-# complex_spec = th.stack([mag_spec * th.cos(phase_spec), mag_spec * th.sin(phase_spec)], dim=-1)
-# return self.complex_spec2wav(complex_spec, length)
-
diff --git a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/conditioners.py b/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/conditioners.py
deleted file mode 100644
index 82792316024b88d4c5c38b0a28f443627771d509..0000000000000000000000000000000000000000
--- a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/conditioners.py
+++ /dev/null
@@ -1,990 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from collections import defaultdict
-from copy import deepcopy
-from dataclasses import dataclass, field
-from itertools import chain
-import logging
-import math
-import random
-import re
-import typing as tp
-import warnings
-
-from einops import rearrange
-from num2words import num2words
-import spacy
-from transformers import T5EncoderModel, T5Tokenizer # type: ignore
-import torchaudio
-import torch
-from torch import nn
-from torch import Tensor
-import torch.nn.functional as F
-from torch.nn.utils.rnn import pad_sequence
-
-from .streaming import StreamingModule
-from .transformer import create_sin_embedding
-from ..data.audio_dataset import SegmentInfo
-from ..utils.autocast import TorchAutocast
-from ..utils.utils import hash_trick, length_to_mask, collate
-
-
-logger = logging.getLogger(__name__)
-TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist)
-ConditionType = tp.Tuple[Tensor, Tensor] # condition, mask
-
-
-class WavCondition(tp.NamedTuple):
- wav: Tensor
- length: Tensor
- path: tp.List[tp.Optional[str]] = []
-
-
-def nullify_condition(condition: ConditionType, dim: int = 1):
- """This function transforms an input condition to a null condition.
- The way it is done by converting it to a single zero vector similarly
- to how it is done inside WhiteSpaceTokenizer and NoopTokenizer.
-
- Args:
- condition (ConditionType): a tuple of condition and mask (tp.Tuple[Tensor, Tensor])
- dim (int): the dimension that will be truncated (should be the time dimension)
- WARNING!: dim should not be the batch dimension!
- Returns:
- ConditionType: a tuple of null condition and mask
- """
- assert dim != 0, "dim cannot be the batch dimension!"
- assert type(condition) == tuple and \
- type(condition[0]) == Tensor and \
- type(condition[1]) == Tensor, "'nullify_condition' got an unexpected input type!"
- cond, mask = condition
- B = cond.shape[0]
- last_dim = cond.dim() - 1
- out = cond.transpose(dim, last_dim)
- out = 0. * out[..., :1]
- out = out.transpose(dim, last_dim)
- mask = torch.zeros((B, 1), device=out.device).int()
- assert cond.dim() == out.dim()
- return out, mask
-
-
-def nullify_wav(wav: Tensor) -> WavCondition:
- """Create a nullified WavCondition from a wav tensor with appropriate shape.
-
- Args:
- wav (Tensor): tensor of shape [B, T]
- Returns:
- WavCondition: wav condition with nullified wav.
- """
- null_wav, _ = nullify_condition((wav, torch.zeros_like(wav)), dim=wav.dim() - 1)
- return WavCondition(
- wav=null_wav,
- length=torch.tensor([0] * wav.shape[0], device=wav.device),
- path=['null_wav'] * wav.shape[0]
- )
-
-
-@dataclass
-class ConditioningAttributes:
- text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict)
- wav: tp.Dict[str, WavCondition] = field(default_factory=dict)
-
- def __getitem__(self, item):
- return getattr(self, item)
-
- @property
- def text_attributes(self):
- return self.text.keys()
-
- @property
- def wav_attributes(self):
- return self.wav.keys()
-
- @property
- def attributes(self):
- return {"text": self.text_attributes, "wav": self.wav_attributes}
-
- def to_flat_dict(self):
- return {
- **{f"text.{k}": v for k, v in self.text.items()},
- **{f"wav.{k}": v for k, v in self.wav.items()},
- }
-
- @classmethod
- def from_flat_dict(cls, x):
- out = cls()
- for k, v in x.items():
- kind, att = k.split(".")
- out[kind][att] = v
- return out
-
-
-class SegmentWithAttributes(SegmentInfo):
- """Base class for all dataclasses that are used for conditioning.
- All child classes should implement `to_condition_attributes` that converts
- the existing attributes to a dataclass of type ConditioningAttributes.
- """
- def to_condition_attributes(self) -> ConditioningAttributes:
- raise NotImplementedError()
-
-
-class Tokenizer:
- """Base class for all tokenizers
- (in case we want to introduce more advances tokenizers in the future).
- """
- def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]:
- raise NotImplementedError()
-
-
-class WhiteSpaceTokenizer(Tokenizer):
- """This tokenizer should be used for natural language descriptions.
- For example:
- ["he didn't, know he's going home.", 'shorter sentence'] =>
- [[78, 62, 31, 4, 78, 25, 19, 34],
- [59, 77, 0, 0, 0, 0, 0, 0]]
- """
- PUNCTUATIONS = "?:!.,;"
-
- def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm",
- lemma: bool = True, stopwords: bool = True) -> None:
- self.n_bins = n_bins
- self.pad_idx = pad_idx
- self.lemma = lemma
- self.stopwords = stopwords
- try:
- self.nlp = spacy.load(language)
- except IOError:
- spacy.cli.download(language) # type: ignore
- self.nlp = spacy.load(language)
-
- @tp.no_type_check
- def __call__(
- self,
- texts: tp.List[tp.Optional[str]],
- return_text: bool = False
- ) -> tp.Tuple[Tensor, Tensor]:
- """Take a list of strings and convert them to a tensor of indices.
-
- Args:
- texts (tp.List[str]): List of strings.
- return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False.
- Returns:
- tp.Tuple[Tensor, Tensor]:
- - Indices of words in the LUT.
- - And a mask indicating where the padding tokens are
- """
- output, lengths = [], []
- texts = deepcopy(texts)
- for i, text in enumerate(texts):
- # if current sample doesn't have a certain attribute, replace with pad token
- if text is None:
- output.append(Tensor([self.pad_idx]))
- lengths.append(0)
- continue
-
- # convert numbers to words
- text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore
- # normalize text
- text = self.nlp(text) # type: ignore
- # remove stopwords
- if self.stopwords:
- text = [w for w in text if not w.is_stop] # type: ignore
- # remove punctuations
- text = [w for w in text if w.text not in self.PUNCTUATIONS] # type: ignore
- # lemmatize if needed
- text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore
-
- texts[i] = " ".join(text)
- lengths.append(len(text))
- # convert to tensor
- tokens = Tensor([hash_trick(w, self.n_bins) for w in text])
- output.append(tokens)
-
- mask = length_to_mask(torch.IntTensor(lengths)).int()
- padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t()
- if return_text:
- return padded_output, mask, texts # type: ignore
- return padded_output, mask
-
-
-class NoopTokenizer(Tokenizer):
- """This tokenizer should be used for global conditioners such as: artist, genre, key, etc.
- The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split
- strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will
- split it to ["Jeff", "Buckley"] and return an index per word.
-
- For example:
- ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101]
- ["Metal", "Rock", "Classical"] => [0, 223, 51]
- """
- def __init__(self, n_bins: int, pad_idx: int = 0):
- self.n_bins = n_bins
- self.pad_idx = pad_idx
-
- def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]:
- output, lengths = [], []
- for text in texts:
- # if current sample doesn't have a certain attribute, replace with pad token
- if text is None:
- output.append(self.pad_idx)
- lengths.append(0)
- else:
- output.append(hash_trick(text, self.n_bins))
- lengths.append(1)
-
- tokens = torch.LongTensor(output).unsqueeze(1)
- mask = length_to_mask(torch.IntTensor(lengths)).int()
- return tokens, mask
-
-
-class BaseConditioner(nn.Module):
- """Base model for all conditioner modules. We allow the output dim to be different
- than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large;
- 2) make all condition dims consistent.
-
- Args:
- dim (int): Hidden dim of the model (text-encoder/LUT).
- output_dim (int): Output dim of the conditioner.
- """
- def __init__(self, dim, output_dim):
- super().__init__()
- self.dim = dim
- self.output_dim = output_dim
- self.output_proj = nn.Linear(dim, output_dim)
-
- def tokenize(self, *args, **kwargs) -> tp.Any:
- """Should be any part of the processing that will lead to a synchronization
- point, e.g. BPE tokenization with transfer to the GPU.
-
- The returned value will be saved and return later when calling forward().
- """
- raise NotImplementedError()
-
- def forward(self, inputs: tp.Any) -> ConditionType:
- """Gets input that should be used as conditioning (e.g, genre, description or a waveform).
- Outputs a ConditionType, after the input data was embedded as a dense vector.
-
- Returns:
- ConditionType:
- - A tensor of size [B, T, D] where B is the batch size, T is the length of the
- output embedding and D is the dimension of the embedding.
- - And a mask indicating where the padding tokens.
- """
- raise NotImplementedError()
-
-
-class TextConditioner(BaseConditioner):
- ...
-
-
-class LUTConditioner(TextConditioner):
- """Lookup table TextConditioner.
-
- Args:
- n_bins (int): Number of bins.
- dim (int): Hidden dim of the model (text-encoder/LUT).
- output_dim (int): Output dim of the conditioner.
- tokenizer (str): Name of the tokenizer.
- pad_idx (int, optional): Index for padding token. Defaults to 0.
- """
- def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0):
- super().__init__(dim, output_dim)
- self.embed = nn.Embedding(n_bins, dim)
- self.tokenizer: Tokenizer
- if tokenizer == "whitespace":
- self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx)
- elif tokenizer == "noop":
- self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx)
- else:
- raise ValueError(f"unrecognized tokenizer `{tokenizer}`.")
-
- def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- device = self.embed.weight.device
- tokens, mask = self.tokenizer(x)
- tokens, mask = tokens.to(device), mask.to(device)
- return tokens, mask
-
- def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType:
- tokens, mask = inputs
- embeds = self.embed(tokens)
- embeds = self.output_proj(embeds)
- embeds = (embeds * mask.unsqueeze(-1))
- return embeds, mask
-
-
-class T5Conditioner(TextConditioner):
- """T5-based TextConditioner.
-
- Args:
- name (str): Name of the T5 model.
- output_dim (int): Output dim of the conditioner.
- finetune (bool): Whether to fine-tune T5 at train time.
- device (str): Device for T5 Conditioner.
- autocast_dtype (tp.Optional[str], optional): Autocast dtype.
- word_dropout (float, optional): Word dropout probability.
- normalize_text (bool, optional): Whether to apply text normalization.
- """
- MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b",
- "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large",
- "google/flan-t5-xl", "google/flan-t5-xxl"]
- MODELS_DIMS = {
- "t5-small": 512,
- "t5-base": 768,
- "t5-large": 1024,
- "t5-3b": 1024,
- "t5-11b": 1024,
- "google/flan-t5-small": 512,
- "google/flan-t5-base": 768,
- "google/flan-t5-large": 1024,
- "google/flan-t5-3b": 1024,
- "google/flan-t5-11b": 1024,
- }
-
- def __init__(self, name: str, output_dim: int, finetune: bool, device: str,
- autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0.,
- normalize_text: bool = False):
- assert name in self.MODELS, f"unrecognized t5 model name (should in {self.MODELS})"
- super().__init__(self.MODELS_DIMS[name], output_dim)
- self.device = device
- self.name = name
- self.finetune = finetune
- self.word_dropout = word_dropout
-
- if autocast_dtype is None or self.device == 'cpu':
- self.autocast = TorchAutocast(enabled=False)
- if self.device != 'cpu':
- logger.warning("T5 has no autocast, this might lead to NaN")
- else:
- dtype = getattr(torch, autocast_dtype)
- assert isinstance(dtype, torch.dtype)
- logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}")
- self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype)
- # Let's disable logging temporarily because T5 will vomit some errors otherwise.
- # thanks https://gist.github.com/simon-weber/7853144
- previous_level = logging.root.manager.disable
- logging.disable(logging.ERROR)
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- try:
- self.t5_tokenizer = T5Tokenizer.from_pretrained(name)
- t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune)
- finally:
- logging.disable(previous_level)
- if finetune:
- self.t5 = t5
- else:
- # this makes sure that the t5 models is not part
- # of the saved checkpoint
- self.__dict__["t5"] = t5.to(device)
-
- self.normalize_text = normalize_text
- if normalize_text:
- self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True)
-
- def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]:
- # if current sample doesn't have a certain attribute, replace with empty string
- entries: tp.List[str] = [xi if xi is not None else "" for xi in x]
- if self.normalize_text:
- _, _, entries = self.text_normalizer(entries, return_text=True)
- if self.word_dropout > 0. and self.training:
- new_entries = []
- for entry in entries:
- words = [word for word in entry.split(" ") if random.random() >= self.word_dropout]
- new_entries.append(" ".join(words))
- entries = new_entries
-
- empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""])
-
- inputs = self.t5_tokenizer(entries, return_tensors="pt", padding=True).to(self.device)
- mask = inputs["attention_mask"]
- mask[empty_idx, :] = 0 # zero-out index where the input is non-existant
- return inputs
-
- def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType:
- mask = inputs["attention_mask"]
- with torch.set_grad_enabled(self.finetune), self.autocast:
- embeds = self.t5(**inputs).last_hidden_state
- embeds = self.output_proj(embeds.to(self.output_proj.weight))
- embeds = (embeds * mask.unsqueeze(-1))
- return embeds, mask
-
-
-class WaveformConditioner(BaseConditioner):
- """Base class for all conditioners that take a waveform as input.
- Classes that inherit must implement `_get_wav_embedding` that outputs
- a continuous tensor, and `_downsampling_factor` that returns the down-sampling
- factor of the embedding model.
-
- Args:
- dim (int): The internal representation dimension.
- output_dim (int): Output dimension.
- device (tp.Union[torch.device, str]): Device.
- """
- def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]):
- super().__init__(dim, output_dim)
- self.device = device
-
- def tokenize(self, wav_length: WavCondition) -> WavCondition:
- wav, length, path = wav_length
- assert length is not None
- return WavCondition(wav.to(self.device), length.to(self.device), path)
-
- def _get_wav_embedding(self, wav: Tensor) -> Tensor:
- """Gets as input a wav and returns a dense vector of conditions."""
- raise NotImplementedError()
-
- def _downsampling_factor(self):
- """Returns the downsampling factor of the embedding model."""
- raise NotImplementedError()
-
- def forward(self, inputs: WavCondition) -> ConditionType:
- """
- Args:
- input (WavCondition): Tuple of (waveform, lengths).
- Returns:
- ConditionType: Dense vector representing the conditioning along with its' mask.
- """
- wav, lengths, path = inputs
- with torch.no_grad():
- embeds = self._get_wav_embedding(wav)
- embeds = embeds.to(self.output_proj.weight)
- embeds = self.output_proj(embeds)
-
- if lengths is not None:
- lengths = lengths / self._downsampling_factor()
- mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore
- else:
- mask = torch.ones_like(embeds)
- embeds = (embeds * mask.unsqueeze(2).to(self.device))
-
- return embeds, mask
-
-
-class ChromaStemConditioner(WaveformConditioner):
- """Chroma conditioner that uses DEMUCS to first filter out drums and bass. The is followed by
- the insight the drums and bass often dominate the chroma, leading to the chroma not containing the
- information about melody.
-
- Args:
- output_dim (int): Output dimension for the conditioner.
- sample_rate (int): Sample rate for the chroma extractor.
- n_chroma (int): Number of chroma for the chroma extractor.
- radix2_exp (int): Radix2 exponent for the chroma extractor.
- duration (float): Duration used during training. This is later used for correct padding
- in case we are using chroma as prefix.
- match_len_on_eval (bool, optional): If True then all chromas are padded to the training
- duration. Defaults to False.
- eval_wavs (str, optional): Path to a json egg with waveform, this waveforms are used as
- conditions during eval (for cases where we don't want to leak test conditions like MusicCaps).
- Defaults to None.
- n_eval_wavs (int, optional): Limits the number of waveforms used for conditioning. Defaults to 0.
- device (tp.Union[torch.device, str], optional): Device for the conditioner.
- **kwargs: Additional parameters for the chroma extractor.
- """
- def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int,
- duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None,
- n_eval_wavs: int = 0, device: tp.Union[torch.device, str] = "cpu", **kwargs):
- from demucs import pretrained
- super().__init__(dim=n_chroma, output_dim=output_dim, device=device)
- self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32)
- self.sample_rate = sample_rate
- self.match_len_on_eval = match_len_on_eval
- self.duration = duration
- self.__dict__["demucs"] = pretrained.get_model('htdemucs').to(device)
- self.stem2idx = {'drums': 0, 'bass': 1, 'other': 2, 'vocal': 3}
- self.stem_idx = torch.LongTensor([self.stem2idx['vocal'], self.stem2idx['other']]).to(device)
- self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp,
- device=device, **kwargs)
- self.chroma_len = self._get_chroma_len()
-
- def _downsampling_factor(self):
- return self.chroma.winhop
-
- def _get_chroma_len(self):
- """Get length of chroma during training"""
- dummy_wav = torch.zeros((1, self.sample_rate * self.duration), device=self.device)
- dummy_chr = self.chroma(dummy_wav)
- return dummy_chr.shape[1]
-
- @torch.no_grad()
- def _get_filtered_wav(self, wav):
- from demucs.apply import apply_model
- from demucs.audio import convert_audio
- with self.autocast:
- wav = convert_audio(wav, self.sample_rate, self.demucs.samplerate, self.demucs.audio_channels)
- stems = apply_model(self.demucs, wav, device=self.device)
- stems = stems[:, self.stem_idx] # extract stem
- stems = stems.sum(1) # merge extracted stems
- stems = stems.mean(1, keepdim=True) # mono
- stems = convert_audio(stems, self.demucs.samplerate, self.sample_rate, 1)
- return stems
-
- @torch.no_grad()
- def _get_wav_embedding(self, wav):
- # avoid 0-size tensors when we are working with null conds
- if wav.shape[-1] == 1:
- return self.chroma(wav)
- stems = self._get_filtered_wav(wav)
- chroma = self.chroma(stems)
-
- if self.match_len_on_eval:
- b, t, c = chroma.shape
- if t > self.chroma_len:
- chroma = chroma[:, :self.chroma_len]
- logger.debug(f'chroma was truncated! ({t} -> {chroma.shape[1]})')
- elif t < self.chroma_len:
- # chroma = F.pad(chroma, (0, 0, 0, self.chroma_len - t))
- n_repeat = int(math.ceil(self.chroma_len / t))
- chroma = chroma.repeat(1, n_repeat, 1)
- chroma = chroma[:, :self.chroma_len]
- logger.debug(f'chroma was zero-padded! ({t} -> {chroma.shape[1]})')
- return chroma
-
-
-class ChromaExtractor(nn.Module):
- """Chroma extraction class, handles chroma extraction and quantization.
-
- Args:
- sample_rate (int): Sample rate.
- n_chroma (int): Number of chroma to consider.
- radix2_exp (int): Radix2 exponent.
- nfft (tp.Optional[int], optional): Number of FFT.
- winlen (tp.Optional[int], optional): Window length.
- winhop (tp.Optional[int], optional): Window hop size.
- argmax (bool, optional): Whether to use argmax. Defaults to False.
- norm (float, optional): Norm for chroma normalization. Defaults to inf.
- device (tp.Union[torch.device, str], optional): Device to use. Defaults to cpu.
- """
- def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12,
- nfft: tp.Optional[int] = None, winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None,
- argmax: bool = False, norm: float = torch.inf, device: tp.Union[torch.device, str] = "cpu"):
- super().__init__()
- from librosa import filters
- self.device = device
- self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32)
- self.winlen = winlen or 2 ** radix2_exp
- self.nfft = nfft or self.winlen
- self.winhop = winhop or (self.winlen // 4)
- self.sr = sample_rate
- self.n_chroma = n_chroma
- self.norm = norm
- self.argmax = argmax
- self.window = torch.hann_window(self.winlen).to(device)
- self.fbanks = torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,
- n_chroma=self.n_chroma)).to(device)
- self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,
- hop_length=self.winhop, power=2, center=True,
- pad=0, normalized=True).to(device)
-
- def forward(self, wav):
- with self.autocast:
- T = wav.shape[-1]
- # in case we are getting a wav that was dropped out (nullified)
- # make sure wav length is no less that nfft
- if T < self.nfft:
- pad = self.nfft - T
- r = 0 if pad % 2 == 0 else 1
- wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)
- assert wav.shape[-1] == self.nfft, f'expected len {self.nfft} but got {wav.shape[-1]}'
- spec = self.spec(wav).squeeze(1)
- raw_chroma = torch.einsum("cf,...ft->...ct", self.fbanks, spec)
- norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)
- norm_chroma = rearrange(norm_chroma, "b d t -> b t d")
-
- if self.argmax:
- idx = norm_chroma.argmax(-1, keepdims=True)
- norm_chroma[:] = 0
- norm_chroma.scatter_(dim=-1, index=idx, value=1)
-
- return norm_chroma
-
-
-def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str):
- """Utility function for nullifying an attribute inside an ConditioningAttributes object.
- If the condition is of type "wav", then nullify it using "nullify_condition".
- If the condition is of any other type, set its' value to None.
- Works in-place.
- """
- if condition_type not in ["text", "wav"]:
- raise ValueError(
- "dropout_condition got an unexpected condition type!"
- f" expected 'wav' or 'text' but got '{condition_type}'"
- )
-
- if condition not in getattr(sample, condition_type):
- raise ValueError(
- "dropout_condition received an unexpected condition!"
- f" expected wav={sample.wav.keys()} and text={sample.text.keys()}"
- f"but got '{condition}' of type '{condition_type}'!"
- )
-
- if condition_type == "wav":
- wav, length, path = sample.wav[condition]
- sample.wav[condition] = nullify_wav(wav)
- else:
- sample.text[condition] = None
-
- return sample
-
-
-class DropoutModule(nn.Module):
- """Base class for all dropout modules."""
- def __init__(self, seed: int = 1234):
- super().__init__()
- self.rng = torch.Generator()
- self.rng.manual_seed(seed)
-
-
-class AttributeDropout(DropoutModule):
- """Applies dropout with a given probability per attribute. This is different from the behavior of
- ClassifierFreeGuidanceDropout as this allows for attributes to be dropped out separately. For example,
- "artist" can be dropped while "genre" remains. This is in contrast to ClassifierFreeGuidanceDropout
- where if "artist" is dropped "genre" must also be dropped.
-
- Args:
- p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example:
- ...
- "genre": 0.1,
- "artist": 0.5,
- "wav": 0.25,
- ...
- active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False.
- seed (int, optional): Random seed.
- """
- def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234):
- super().__init__(seed=seed)
- self.active_on_eval = active_on_eval
- # construct dict that return the values from p otherwise 0
- self.p = {}
- for condition_type, probs in p.items():
- self.p[condition_type] = defaultdict(lambda: 0, probs)
-
- def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]:
- """
- Args:
- samples (tp.List[ConditioningAttributes]): List of conditions.
- Returns:
- tp.List[ConditioningAttributes]: List of conditions after certain attributes were set to None.
- """
- if not self.training and not self.active_on_eval:
- return samples
-
- samples = deepcopy(samples)
-
- for condition_type, ps in self.p.items(): # for condition types [text, wav]
- for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre])
- if torch.rand(1, generator=self.rng).item() < p:
- for sample in samples:
- dropout_condition(sample, condition_type, condition)
-
- return samples
-
- def __repr__(self):
- return f"AttributeDropout({dict(self.p)})"
-
-
-class ClassifierFreeGuidanceDropout(DropoutModule):
- """Applies Classifier Free Guidance dropout, meaning all attributes
- are dropped with the same probability.
-
- Args:
- p (float): Probability to apply condition dropout during training.
- seed (int): Random seed.
- """
- def __init__(self, p: float, seed: int = 1234):
- super().__init__(seed=seed)
- self.p = p
-
- def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]:
- """
- Args:
- samples (tp.List[ConditioningAttributes]): List of conditions.
- Returns:
- tp.List[ConditioningAttributes]: List of conditions after all attributes were set to None.
- """
- if not self.training:
- return samples
-
- # decide on which attributes to drop in a batched fashion
- drop = torch.rand(1, generator=self.rng).item() < self.p
- if not drop:
- return samples
-
- # nullify conditions of all attributes
- samples = deepcopy(samples)
-
- for condition_type in ["wav", "text"]:
- for sample in samples:
- for condition in sample.attributes[condition_type]:
- dropout_condition(sample, condition_type, condition)
-
- return samples
-
- def __repr__(self):
- return f"ClassifierFreeGuidanceDropout(p={self.p})"
-
-
-class ConditioningProvider(nn.Module):
- """Main class to provide conditions given all the supported conditioners.
-
- Args:
- conditioners (dict): Dictionary of conditioners.
- merge_text_conditions_p (float, optional): Probability to merge all text sources
- into a single text condition. Defaults to 0.
- drop_desc_p (float, optional): Probability to drop the original description
- when merging all text sources into a single text condition. Defaults to 0.
- device (tp.Union[torch.device, str], optional): Device for conditioners and output condition types.
- """
- def __init__(
- self,
- conditioners: tp.Dict[str, BaseConditioner],
- merge_text_conditions_p: float = 0,
- drop_desc_p: float = 0,
- device: tp.Union[torch.device, str] = "cpu",
- ):
- super().__init__()
- self.device = device
- self.merge_text_conditions_p = merge_text_conditions_p
- self.drop_desc_p = drop_desc_p
- self.conditioners = nn.ModuleDict(conditioners)
-
- @property
- def text_conditions(self):
- return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)]
-
- @property
- def wav_conditions(self):
- return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)]
-
- @property
- def has_wav_condition(self):
- return len(self.wav_conditions) > 0
-
- def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]:
- """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly.
- This should be called before starting any real GPU work to avoid synchronization points.
- This will return a dict matching conditioner names to their arbitrary tokenized representations.
-
- Args:
- inputs (list[ConditioningAttribres]): List of ConditioningAttributes objects containing
- text and wav conditions.
- """
- assert all([type(x) == ConditioningAttributes for x in inputs]), \
- "got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]" \
- f" but types were {set([type(x) for x in inputs])}"
-
- output = {}
- text = self._collate_text(inputs)
- wavs = self._collate_wavs(inputs)
-
- assert set(text.keys() | wavs.keys()).issubset(set(self.conditioners.keys())), \
- f"got an unexpected attribute! Expected {self.conditioners.keys()}, got {text.keys(), wavs.keys()}"
-
- for attribute, batch in chain(text.items(), wavs.items()):
- output[attribute] = self.conditioners[attribute].tokenize(batch)
- return output
-
- def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]:
- """Compute pairs of `(embedding, mask)` using the configured conditioners
- and the tokenized representations. The output is for example:
-
- {
- "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])),
- "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])),
- ...
- }
-
- Args:
- tokenized (dict): Dict of tokenized representations as returned by `tokenize()`.
- """
- output = {}
- for attribute, inputs in tokenized.items():
- condition, mask = self.conditioners[attribute](inputs)
- output[attribute] = (condition, mask)
- return output
-
- def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]:
- """Given a list of ConditioningAttributes objects, compile a dictionary where the keys
- are the attributes and the values are the aggregated input per attribute.
- For example:
- Input:
- [
- ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...),
- ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...),
- ]
- Output:
- {
- "genre": ["Rock", "Hip-hop"],
- "description": ["A rock song with a guitar solo", "A hip-hop verse"]
- }
- """
- batch_per_attribute: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list)
-
- def _merge_conds(cond, merge_text_conditions_p=0, drop_desc_p=0):
- def is_valid(k, v):
- k_valid = k in ['key', 'bpm', 'genre', 'moods', 'instrument']
- v_valid = v is not None and isinstance(v, (int, float, str, list))
- return k_valid and v_valid
-
- def process_value(v):
- if isinstance(v, (int, float, str)):
- return v
- if isinstance(v, list):
- return ", ".join(v)
- else:
- RuntimeError(f"unknown type for text value! ({type(v), v})")
-
- desc = cond.text['description']
- meta_data = ""
- if random.uniform(0, 1) < merge_text_conditions_p:
- meta_pairs = [f'{k}: {process_value(v)}' for k, v in cond.text.items() if is_valid(k, v)]
- random.shuffle(meta_pairs)
- meta_data = ". ".join(meta_pairs)
- desc = desc if not random.uniform(0, 1) < drop_desc_p else None
-
- if desc is None:
- desc = meta_data if len(meta_data) > 1 else None
- else:
- desc = desc.rstrip('.') + ". " + meta_data
- cond.text['description'] = desc.strip() if desc else None
-
- if self.training and self.merge_text_conditions_p:
- for sample in samples:
- _merge_conds(sample, self.merge_text_conditions_p, self.drop_desc_p)
-
- texts = [x.text for x in samples]
- for text in texts:
- for condition in self.text_conditions:
- batch_per_attribute[condition].append(text[condition])
-
- return batch_per_attribute
-
- def _collate_wavs(self, samples: tp.List[ConditioningAttributes]):
- """Generate a dict where the keys are attributes by which we fetch similar wavs,
- and the values are Tensors of wavs according to said attribtues.
-
- *Note*: by the time the samples reach this function, each sample should have some waveform
- inside the "wav" attribute. It should be either:
- 1. A real waveform
- 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset)
- 3. A null waveform due to it being dropped in a dropout module (nullified by dropout)
-
- Args:
- samples (tp.List[ConditioningAttributes]): List of ConditioningAttributes samples.
- Returns:
- dict: A dicionary mapping an attribute name to wavs.
- """
- wavs = defaultdict(list)
- lens = defaultdict(list)
- paths = defaultdict(list)
- out = {}
-
- for sample in samples:
- for attribute in self.wav_conditions:
- wav, length, path = sample.wav[attribute]
- wavs[attribute].append(wav.flatten())
- lens[attribute].append(length)
- paths[attribute].append(path)
-
- # stack all wavs to a single tensor
- for attribute in self.wav_conditions:
- stacked_wav, _ = collate(wavs[attribute], dim=0)
- out[attribute] = WavCondition(stacked_wav.unsqueeze(1),
- torch.cat(lens['self_wav']), paths[attribute]) # type: ignore
-
- return out
-
-
-class ConditionFuser(StreamingModule):
- """Condition fuser handles the logic to combine the different conditions
- to the actual model input.
-
- Args:
- fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse
- each condition. For example:
- {
- "prepend": ["description"],
- "sum": ["genre", "bpm"],
- "cross": ["description"],
- }
- cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention.
- cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used.
- """
- FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"]
-
- def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False,
- cross_attention_pos_emb_scale: float = 1.0):
- super().__init__()
- assert all(
- [k in self.FUSING_METHODS for k in fuse2cond.keys()]
- ), f"got invalid fuse method, allowed methods: {self.FUSING_MEHTODS}"
- self.cross_attention_pos_emb = cross_attention_pos_emb
- self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale
- self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond
- self.cond2fuse: tp.Dict[str, str] = {}
- for fuse_method, conditions in fuse2cond.items():
- for condition in conditions:
- self.cond2fuse[condition] = fuse_method
-
- def forward(
- self,
- input: Tensor,
- conditions: tp.Dict[str, ConditionType]
- ) -> tp.Tuple[Tensor, tp.Optional[Tensor]]:
- """Fuse the conditions to the provided model input.
-
- Args:
- input (Tensor): Transformer input.
- conditions (tp.Dict[str, ConditionType]): Dict of conditions.
- Returns:
- tp.Tuple[Tensor, Tensor]: The first tensor is the transformer input
- after the conditions have been fused. The second output tensor is the tensor
- used for cross-attention or None if no cross attention inputs exist.
- """
- B, T, _ = input.shape
-
- if 'offsets' in self._streaming_state:
- first_step = False
- offsets = self._streaming_state['offsets']
- else:
- first_step = True
- offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device)
-
- assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \
- f"given conditions contain unknown attributes for fuser, " \
- f"expected {self.cond2fuse.keys()}, got {conditions.keys()}"
- cross_attention_output = None
- for cond_type, (cond, cond_mask) in conditions.items():
- op = self.cond2fuse[cond_type]
- if op == "sum":
- input += cond
- elif op == "input_interpolate":
- cond = rearrange(cond, "b t d -> b d t")
- cond = F.interpolate(cond, size=input.shape[1])
- input += rearrange(cond, "b d t -> b t d")
- elif op == "prepend":
- if first_step:
- input = torch.cat([cond, input], dim=1)
- elif op == "cross":
- if cross_attention_output is not None:
- cross_attention_output = torch.cat([cross_attention_output, cond], dim=1)
- else:
- cross_attention_output = cond
- else:
- raise ValueError(f"unknown op ({op})")
-
- if self.cross_attention_pos_emb and cross_attention_output is not None:
- positions = torch.arange(
- cross_attention_output.shape[1],
- device=cross_attention_output.device
- ).view(1, -1, 1)
- pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1])
- cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb
-
- if self._is_streaming:
- self._streaming_state['offsets'] = offsets + T
-
- return input, cross_attention_output
diff --git a/spaces/AchyuthGamer/OpenGPT/client/js/icons.js b/spaces/AchyuthGamer/OpenGPT/client/js/icons.js
deleted file mode 100644
index 84fed38dd35e0d0203370a8314a360d27f350dd6..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/client/js/icons.js
+++ /dev/null
@@ -1 +0,0 @@
-window.FontAwesomeKitConfig={asyncLoading:{enabled:!1},autoA11y:{enabled:!0},baseUrl:"https://ka-f.fontawesome.com",baseUrlKit:"https://kit-pro.fontawesome.com",detectConflictsUntil:null,iconUploads:{},id:96462084,license:"pro",method:"css",minify:{enabled:!0},token:"d0514f1901",v4FontFaceShim:{enabled:!0},v4shim:{enabled:!0},v5FontFaceShim:{enabled:!0},version:"6.1.1"},function(t){"function"==typeof define&&define.amd?define("kit-loader",t):t()}(function(){"use strict";function t(e){return(t="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(e)}function e(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function n(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(t);e&&(o=o.filter(function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable})),n.push.apply(n,o)}return n}function o(t){for(var o=1;ot.length)&&(e=t.length);for(var n=0,o=new Array(e);n2&&void 0!==arguments[2]?arguments[2]:function(){},r=e.document||r,i=a.bind(a,r,["fa","fab","fas","far","fal","fad","fak"]),u=Object.keys(t.iconUploads||{}).length>0;t.autoA11y.enabled&&n(i);var f=[{id:"fa-main",addOn:void 0}];t.v4shim&&t.v4shim.enabled&&f.push({id:"fa-v4-shims",addOn:"-v4-shims"}),t.v5FontFaceShim&&t.v5FontFaceShim.enabled&&f.push({id:"fa-v5-font-face",addOn:"-v5-font-face"}),t.v4FontFaceShim&&t.v4FontFaceShim.enabled&&f.push({id:"fa-v4-font-face",addOn:"-v4-font-face"}),u&&f.push({id:"fa-kit-upload",customCss:!0});var s=f.map(function(n){return new F(function(r,i){E(n.customCss?function(t){return t.baseUrlKit+"/"+t.token+"/"+t.id+"/kit-upload.css"}(t):c(t,{addOn:n.addOn,minify:t.minify.enabled}),e).then(function(i){r(function(t,e){var n=e.contentFilter||function(t,e){return t},o=document.createElement("style"),r=document.createTextNode(n(t,e));return o.appendChild(r),o.media="all",e.id&&o.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&o.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),o}(i,o(o({},e),{},{baseUrl:t.baseUrl,version:t.version,id:n.id,contentFilter:function(t,e){return _(t,e.baseUrl,e.version)}})))}).catch(i)})});return F.all(s)}function P(t,e){var n=document.createElement("SCRIPT"),o=document.createTextNode(t);return n.appendChild(o),n.referrerPolicy="strict-origin",e.id&&n.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n}function U(t){var e,n=[],o=document,r=(o.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(o.readyState);r||o.addEventListener("DOMContentLoaded",e=function(){for(o.removeEventListener("DOMContentLoaded",e),r=1;e=n.shift();)e()}),r?setTimeout(t,0):n.push(t)}try{if(window.FontAwesomeKitConfig){var k=window.FontAwesomeKitConfig,L={detectingConflicts:k.detectConflictsUntil&&new Date<=new Date(k.detectConflictsUntil),detectionIgnoreAttr:"data-fa-detection-ignore",fetch:window.fetch,token:k.token,XMLHttpRequest:window.XMLHttpRequest,document:document},I=document.currentScript,T=I?I.parentElement:document.head;(function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"js"===t.method?function(t,e){e.autoA11y=t.autoA11y.enabled,"pro"===t.license&&(e.autoFetchSvg=!0,e.fetchSvgFrom=t.baseUrl+"/releases/"+("latest"===t.version?"latest":"v".concat(t.version))+"/svgs",e.fetchUploadedSvgFrom=t.uploadsUrl);var n=[];return t.v4shim.enabled&&n.push(new F(function(n,r){E(c(t,{addOn:"-v4-shims",minify:t.minify.enabled}),e).then(function(t){n(P(t,o(o({},e),{},{id:"fa-v4-shims"})))}).catch(r)})),n.push(new F(function(n,r){E(c(t,{minify:t.minify.enabled}),e).then(function(t){var r=P(t,o(o({},e),{},{id:"fa-main"}));n(function(t,e){var n=e&&void 0!==e.autoFetchSvg?e.autoFetchSvg:void 0,o=e&&void 0!==e.autoA11y?e.autoA11y:void 0;return void 0!==o&&t.setAttribute("data-auto-a11y",o?"true":"false"),n&&(t.setAttributeNode(document.createAttribute("data-auto-fetch-svg")),t.setAttribute("data-fetch-svg-from",e.fetchSvgFrom),t.setAttribute("data-fetch-uploaded-svg-from",e.fetchUploadedSvgFrom)),t}(r,e))}).catch(r)})),F.all(n)}(t,e):"css"===t.method?C(t,e,function(t){U(t),function(t){"undefined"!=typeof MutationObserver&&new MutationObserver(t).observe(document,{childList:!0,subtree:!0})}(t)}):void 0})(k,L).then(function(t){t.map(function(t){try{T.insertBefore(t,I?I.nextSibling:null)}catch(e){T.appendChild(t)}}),L.detectingConflicts&&I&&U(function(){I.setAttributeNode(document.createAttribute(L.detectionIgnoreAttr));var t=function(t,e){var n=document.createElement("script");return e&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n.src=c(t,{baseFilename:"conflict-detection",fileSuffix:"js",subdir:"js",minify:t.minify.enabled}),n}(k,L);document.body.appendChild(t)})}).catch(function(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))})}}catch(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))}});
\ No newline at end of file
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Aichat.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Aichat.py
deleted file mode 100644
index 8edd17e2c6938e2fdd4886e2354580f7e4108960..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Aichat.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-
-from .base_provider import AsyncProvider, format_prompt
-
-
-class Aichat(AsyncProvider):
- url = "https://chat-gpt.org/chat"
- working = True
- supports_gpt_35_turbo = True
-
- @staticmethod
- async def create_async(
- model: str,
- messages: list[dict[str, str]],
- proxy: str = None,
- **kwargs
- ) -> str:
- headers = {
- "authority": "chat-gpt.org",
- "accept": "*/*",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": "https://chat-gpt.org",
- "pragma": "no-cache",
- "referer": "https://chat-gpt.org/chat",
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"macOS"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
- }
- async with ClientSession(
- headers=headers
- ) as session:
- json_data = {
- "message": format_prompt(messages),
- "temperature": kwargs.get('temperature', 0.5),
- "presence_penalty": 0,
- "top_p": kwargs.get('top_p', 1),
- "frequency_penalty": 0,
- }
- async with session.post(
- "https://chat-gpt.org/api/text",
- proxy=proxy,
- json=json_data
- ) as response:
- response.raise_for_status()
- result = await response.json()
- if not result['response']:
- raise Exception(f"Error Response: {result}")
- return result["message"]
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Bard.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Bard.py
deleted file mode 100644
index 7c42b680c8a9751a1eb0a4f9525bc3a2b95b1929..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Bard.py
+++ /dev/null
@@ -1,92 +0,0 @@
-from __future__ import annotations
-
-import json
-import random
-import re
-
-from aiohttp import ClientSession
-
-from ..base_provider import AsyncProvider, format_prompt, get_cookies
-
-
-class Bard(AsyncProvider):
- url = "https://bard.google.com"
- needs_auth = True
- working = True
- _snlm0e = None
-
- @classmethod
- async def create_async(
- cls,
- model: str,
- messages: list[dict[str, str]],
- proxy: str = None,
- cookies: dict = None,
- **kwargs
- ) -> str:
- prompt = format_prompt(messages)
- if proxy and "://" not in proxy:
- proxy = f"http://{proxy}"
- if not cookies:
- cookies = get_cookies(".google.com")
-
- headers = {
- 'authority': 'bard.google.com',
- 'origin': 'https://bard.google.com',
- 'referer': 'https://bard.google.com/',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
- 'x-same-domain': '1',
- }
-
- async with ClientSession(
- cookies=cookies,
- headers=headers
- ) as session:
- if not cls._snlm0e:
- async with session.get(cls.url, proxy=proxy) as response:
- text = await response.text()
-
- match = re.search(r'SNlM0e\":\"(.*?)\"', text)
- if not match:
- raise RuntimeError("No snlm0e value.")
- cls._snlm0e = match.group(1)
-
- params = {
- 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
- '_reqid': random.randint(1111, 9999),
- 'rt': 'c'
- }
-
- data = {
- 'at': cls._snlm0e,
- 'f.req': json.dumps([None, json.dumps([[prompt]])])
- }
-
- intents = '.'.join([
- 'assistant',
- 'lamda',
- 'BardFrontendService'
- ])
-
- async with session.post(
- f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
- data=data,
- params=params,
- proxy=proxy
- ) as response:
- response = await response.text()
- response = json.loads(response.splitlines()[3])[0][2]
- response = json.loads(response)[4][0][1][0]
- return response
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/spaces/Adr740/CV_XPLORER_POC/get_cv.py b/spaces/Adr740/CV_XPLORER_POC/get_cv.py
deleted file mode 100644
index 6888d0ed696198ddd487f6e8fd7c0635758c9761..0000000000000000000000000000000000000000
--- a/spaces/Adr740/CV_XPLORER_POC/get_cv.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import pandas as pd
-import openai
-from data import data as df
-import numpy as np
-import os
-
-openai.api_key = os.environ.get("openai")
-
-def cosine_similarity(a, b):
- return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
-
-
-def get_embedding(text, model="text-embedding-ada-002"):
- try:
- text = text.replace("\n", " ")
- except:
- None
- return openai.Embedding.create(input = [text], model=model)['data'][0]['embedding']
-
-def search_cv(search, nb=3, pprint=True):
- embedding = get_embedding(search, model='text-embedding-ada-002')
- dff = df.copy()
- dff['similarities'] = dff.embedding.apply(lambda x: cosine_similarity(x, embedding))
- res = dff.sort_values('similarities', ascending=False).head(int(nb))
- # try:
- # res.drop(columns=["id","hadith_id", "embeding"], inplace=True)
- # except:
- # pass
- return res
-
-def get_cv(text, nb):
- result = search_cv(text,nb).to_dict(orient="records")
- final_str = ""
- for r in result:
- final_str += "#### Candidat avec " + str(round(r["similarities"]*100,2)) + "% de similarité :\n"+ str(r["summary"]).replace("#","")
- final_str += "\n\n[-> Lien vers le CV complet]("+ str(r["url"]) + ')'
- final_str += "\n\n-----------------------------------------------------------------------------------------------------\n\n"
- final_str = final_str.replace("`", "")
- return final_str
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/scripts/__init__.py b/spaces/AgentVerse/agentVerse/scripts/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/quadimage.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/quadimage.js
deleted file mode 100644
index 375544b7ee922540d35caba6acdeea51f848d2c1..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/quadimage.js
+++ /dev/null
@@ -1,13 +0,0 @@
-import QuadImage from './gameobjects/mesh/quad/image/Image.js';
-import QuadRenderTexture from './gameobjects/mesh/quad/rendertexture/RenderTexture.js';
-import SkewImage from './gameobjects/mesh/quad/skewimage/SkewImage.js';
-import SkewRenderTexture from './gameobjects/mesh/quad/skewrendertexture/SkewRenderTexture.js';
-import ContainerSkew from './behaviors/containerskew/ContainerSkew.js';
-
-export {
- QuadImage,
- QuadRenderTexture,
- SkewImage,
- SkewRenderTexture,
- ContainerSkew
-}
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/imagebox/ImageBox.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/imagebox/ImageBox.js
deleted file mode 100644
index 027c59e8877e97bd0e56e95a194f25a542a29a2e..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/imagebox/ImageBox.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import ImageBox from '../../../plugins/imagebox.js';
-export default ImageBox;
\ No newline at end of file
diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/vits/monotonic_align/__init__.py b/spaces/Akmyradov/TurkmenTTSweSTT/vits/monotonic_align/__init__.py
deleted file mode 100644
index 3d7009c40fea3a98168e3e3bc9ae061e91327422..0000000000000000000000000000000000000000
--- a/spaces/Akmyradov/TurkmenTTSweSTT/vits/monotonic_align/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import numpy as np
-import torch
-from .monotonic_align.core import maximum_path_c
-
-
-def maximum_path(neg_cent, mask):
- """ Cython optimized version.
- neg_cent: [b, t_t, t_s]
- mask: [b, t_t, t_s]
- """
- device = neg_cent.device
- dtype = neg_cent.dtype
- neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
- path = np.zeros(neg_cent.shape, dtype=np.int32)
-
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
- maximum_path_c(path, neg_cent, t_t_max, t_s_max)
- return torch.from_numpy(path).to(device=device, dtype=dtype)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_onnx_objects.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_onnx_objects.py
deleted file mode 100644
index bde5f6ad0793e2d81bc638600b46ff81748d09ee..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_onnx_objects.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is autogenerated by the command `make fix-copies`, do not edit.
-from ..utils import DummyObject, requires_backends
-
-
-class OnnxRuntimeModel(metaclass=DummyObject):
- _backends = ["onnx"]
-
- def __init__(self, *args, **kwargs):
- requires_backends(self, ["onnx"])
-
- @classmethod
- def from_config(cls, *args, **kwargs):
- requires_backends(cls, ["onnx"])
-
- @classmethod
- def from_pretrained(cls, *args, **kwargs):
- requires_backends(cls, ["onnx"])
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py
deleted file mode 100644
index 8d154763bf810dc9f668988f05f53dd32a354a31..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py
+++ /dev/null
@@ -1,5 +0,0 @@
-_base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py'
-# model settings
-model = dict(
- pretrained='open-mmlab://detectron2/resnet101_caffe',
- backbone=dict(depth=101))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py
deleted file mode 100644
index f36eb02e68707d502cbe315ff8f6f25b232dee92..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './fcn_r50-d8_769x769_80k_cityscapes.py'
-model = dict(
- pretrained='torchvision://resnet101',
- backbone=dict(type='ResNet', depth=101))
diff --git a/spaces/AndySAnker/DeepStruc/tools/utils.py b/spaces/AndySAnker/DeepStruc/tools/utils.py
deleted file mode 100644
index c1388855b0080fb176e3f28ddb6adcd0d92fe2d9..0000000000000000000000000000000000000000
--- a/spaces/AndySAnker/DeepStruc/tools/utils.py
+++ /dev/null
@@ -1,279 +0,0 @@
-import torch, os, yaml, sys
-import numpy as np
-import matplotlib.pyplot as plt
-import pandas as pd
-from tqdm import tqdm
-from matplotlib.patches import Ellipse
-import matplotlib.lines as mlines
-from matplotlib.gridspec import GridSpec
-import datetime
-from tools.data_loader import save_xyz_file
-import streamlit as st
-
-def get_data(args): # Todo: write your own dataloader.
- ct = str(datetime.datetime.now()).replace(' ', '_').replace(':','-').replace('.','-')
- project_name = f'{args.save_path}/DeepStruc_{ct}'
- print(f'\nProject name is: {project_name}')
- if not os.path.isdir(f'{project_name}'):
- os.mkdir(f'{project_name}')
-
- samples = args.num_samples
- ## Use the uploaded file. Does not support multiple files. Could be written smarter.
- files = ['uploaded_file.gr']
- this_path = '.'
- #this_path = args.data
- #if os.path.isdir(this_path):
- # files = sorted(os.listdir(this_path))
- #else:
- # files = [this_path]
- # this_path = '.'
-
- x_list, y_list, name_list = [], [], []
- idxx = 0
- np_data = np.zeros((len(files)*samples, 2800))
- for idx, file in enumerate(files):
- for skip_row in range(100):
- try:
- data = np.loadtxt(f'{this_path}/{file}', skiprows=skip_row)
- except ValueError:
- continue
- data = data.T
- x_list.append(data[0])
- y_list.append(data[1])
- Gr_ph = data[1]
- if round(data[0][1] - data[0][0],2) != 0.01:
- raise ValueError("The PDF does not have an r-step of 0.01 Å")
- try:
- start_PDF = np.where((data[0] > 1.995) & (data[0] < 2.005))[0][0]
- except:
- Gr_ph = np.concatenate((np.zeros((int((data[0][0])/0.01))), Gr_ph))
- print("The PDFs first value is above 2 Å. We have added 0's down to 2 Å as a quick fix.")
- try:
- end_PDF = np.where((data[0] > 29.995) & (data[0] < 30.005))[0][0]
- except:
- Gr_ph = np.concatenate((Gr_ph, np.zeros((3000-len(Gr_ph)))))
- print("The PDFs last value is before 30 Å. We have added 0's up to 30 Å as a quick fix.")
- Gr_ph = Gr_ph[200:3000]
-
- for i in range(samples):
- np_data[idxx] = Gr_ph
- np_data[idxx] /= np.amax(np_data[idxx])
- idxx += 1
- name_list.append(file)
- break
-
- fig, ax = plt.subplots()
-
- plt.plot(x_list[0], y_list[0], label="Input PDF")
- plt.plot(np.arange(2, 30, 0.01), np_data[0], label="DeepStruc PDF")
- ax.set_xlabel(r'r / $\mathtt{\AA}$')
- ax.set_ylabel('G(r) / a.u.')
-
- ax.set_xlim(0,30)
- plt.legend()
- plt.title(f'{files[0]}')
- plt.tight_layout()
- plt.savefig(f'{project_name}/PDFs.png', dpi=300)
-
- np_data = np_data.reshape((len(files)*samples, 2800, 1))
- np_data = torch.tensor(np_data, dtype=torch.float)
- return np_data, name_list, project_name
-
-
-def format_predictions(latent_space, data_names, mus, sigmas, sigma_inc):
- df_preds = pd.DataFrame(columns=['x', 'y', 'file_name', 'mu', 'sigma', 'sigma_inc'])
- for i,j, mu, sigma in zip(latent_space, data_names, mus, sigmas):
- if '/' in j:
- j = j.split('/')[-1]
-
- if '.' in j:
- j_idx = j.rindex('.')
- j = j[:j_idx]
-
- info_dict = {
- 'x': i[0].detach().cpu().numpy(),
- 'y': i[1].detach().cpu().numpy(),
- 'file_name': j,
- 'mu': mu.detach().cpu().numpy(),
- 'sigma': sigma.detach().cpu().numpy(),
- 'sigma_inc': sigma_inc,}
-
-
- print ("info dict: ", info_dict)
- print ("df_preds initial: ", df_preds.head())
-
- # Append is deprecated and should use concat instead
- df_preds = df_preds.append(info_dict, ignore_index=True)
-
- return df_preds
-
-
-def plot_ls(df, index_highlight):
- ideal_ls = './tools/ls_points.csv'
- color_dict = {
- 'FCC': '#19ADFF',
- 'BCC': '#4F8F00',
- 'SC': '#941100',
- 'Octahedron': '#212121',
- 'Icosahedron': '#005493',
- 'Decahedron': '#FF950E',
- 'HCP': '#FF8AD8',
- }
- df_ideal = pd.read_csv(ideal_ls, index_col=0) # Get latent space data
- # Plotting inputs
- ## Training and validation data
- MARKER_SIZE_TR = 60
- EDGE_LINEWIDTH_TR = 0.0
- ALPHA_TR = 0.3
-
- ## Figure
- FIG_SIZE = (10, 4)
- MARKER_SIZE_FG = 60
- MARKER_FONT_SIZE = 10
- MARKER_SCALE = 1.5
-
- fig = plt.figure(figsize=FIG_SIZE)
- gs = GridSpec(1, 5, figure=fig)
- ax = fig.add_subplot(gs[0, :4])
- ax_legend = fig.add_subplot(gs[0, 4])
-
- if int(index_highlight) >= len(df):
- print(f'\nIndex argument is to large! Need to be smaller than {len(df)} but was {index_highlight}')
- raise IndexError
- elif int(index_highlight) < -1:
- print(f'\nIndex argument invalid! Must be integer from -1 to number of samples generated.')
- raise ValueError
- elif int(index_highlight)==-1:
- pass
- elif len(df['file_name'].unique()) > 1:
- print(f'\nCan only show highlight index if --data is specific file but {len(df["file_name"].unique())} files were loaded.')
- else:
- print(f'\nHighlighting index {index_highlight} from the {df["file_name"].unique()[0]} sampling pool.')
- ax.scatter(df.iloc[index_highlight]['x'], df.iloc[index_highlight]['y'], c='k', s=40,
- linewidth=0.0, marker='o', zorder=3)
- ax.scatter(df.iloc[index_highlight]['x'], df.iloc[index_highlight]['y'], c='w', s=25,
- linewidth=0.0, marker='o', zorder=3)
- ax.scatter(df.iloc[index_highlight]['x'], df.iloc[index_highlight]['y'], c='k', s=10,
- linewidth=0.0, marker='o', zorder=3)
- ax.scatter(df.iloc[index_highlight]['x'], df.iloc[index_highlight]['y'], c='w', s=1,
- linewidth=0.0, marker='o', zorder=3)
-
- print('\nPlotting DeepStruc training + validation data.')
- ax.scatter(df_ideal.iloc[:]['x'].values, df_ideal.iloc[:]['y'].values,
- c=[color_dict[str(s)] for s in df_ideal.iloc[:]['stru_type']],
- s=MARKER_SIZE_TR * df_ideal.iloc[:]['size'].values,
- edgecolors='k', linewidth=EDGE_LINEWIDTH_TR,
- alpha=ALPHA_TR)
-
-
- mlines_list = []
- for key in color_dict.keys():
- mlines_list.append(
- mlines.Line2D([], [], MARKER_SIZE_FG, marker='o', c=color_dict[key], linestyle='None', label=key,
- mew=1))
-
- from matplotlib import cm
- cm_subsection = np.linspace(0, 1, len(df.file_name.unique()))
- data_color = [cm.magma(x) for x in cm_subsection]
-
- print('\nPlotting DeepStruc structure sampling.')
- pbar = tqdm(total=len(df.file_name.unique()))
- for idx, file_name in enumerate(df.file_name.unique()):
- this_c = np.array([data_color[idx]])
-
- df_ph = df[df.file_name==file_name]
- df_ph.reset_index(drop=True, inplace=True)
-
- ax.scatter(df_ph['mu'][0][0],df_ph['mu'][0][1], c=this_c, s=10, edgecolors='k',
- linewidth=0.5, marker='D',zorder=1)
- ellipse = Ellipse((df_ph['mu'][0][0],df_ph['mu'][0][1]),df_ph['sigma'][0][0],df_ph['sigma'][0][1], ec='k', fc=this_c, alpha=0.5, fill=True, zorder=-1)
- ax.add_patch(ellipse)
-
- ellipse = Ellipse((df_ph['mu'][0][0],df_ph['mu'][0][1]),df_ph['x'].var(),df_ph['y'].var(), ec='k', fc=this_c, alpha=0.2, fill=True, zorder=-1)
- ax.add_patch(ellipse)
-
- mlines_list.append(
- mlines.Line2D([], [], MARKER_SIZE_FG, marker='D', c=this_c, linestyle='None', label=file_name, mec='k',
- mew=1))
-
- for index, sample in df_ph.iterrows():
- ax.scatter(sample['x'], sample['y'], c=this_c, s=10, edgecolors='k',
- linewidth=0.8, marker='o', zorder=2)
- pbar.update()
- pbar.close()
-
- ax_legend.legend(handles=mlines_list,fancybox=True, #ncol=2, #, bbox_to_anchor=(0.8, 0.5)
- markerscale=MARKER_SCALE, fontsize=MARKER_FONT_SIZE, loc='upper right')
-
- ax.set_xlabel('Latent space $\mathregular{z_0}$', size=10) # Latent Space Feature 1
- ax.set_ylabel('Latent space $\mathregular{z_1}$', size=10)
-
- ax_legend.spines['top'].set_visible(False)
- ax_legend.spines['right'].set_visible(False)
- ax_legend.spines['bottom'].set_visible(False)
- ax_legend.spines['left'].set_visible(False)
- ax_legend.get_xaxis().set_ticks([])
- ax_legend.get_yaxis().set_ticks([])
- ax.get_xaxis().set_ticks([])
- ax.get_yaxis().set_ticks([])
-
- plt.tight_layout()
-
- return fig
-
-def get_model(model_dir):
- if model_dir == 'DeepStruc':
- with open(f'./models/DeepStruc/model_arch.yaml') as file:
- model_arch = yaml.full_load(file)
- model_path = './models/DeepStruc/models/DeepStruc.ckpt'
- return model_path, model_arch
- if os.path.isdir(model_dir):
- if 'models' in os.listdir(model_dir):
- models = sorted(os.listdir(f'{model_dir}/models'))
- models = [model for model in models if '.ckpt' in model]
- print(f'No specific model was provided. {models[0]} was chosen.')
- print('Dataloader might not be sufficient in loading dimensions.')
- model_path = f'{model_dir}/models/{models[0]}'
- with open(f'{model_dir}/model_arch.yaml') as file:
- model_arch = yaml.full_load(file)
-
- return model_path, model_arch
- else:
- print(f'Path not understood: {model_dir}')
- else:
- idx = model_dir.rindex('/')
- with open(f'{model_dir[:idx-6]}model_arch.yaml') as file:
- model_arch = yaml.full_load(file)
-
- return model_dir, model_arch
-
-
-def save_predictions(xyz_pred, df, project_name, model_arch, args):
- print('\nSaving predicted structures as XYZ files.')
- if not os.path.isdir(f'{project_name}'):
- os.mkdir(f'{project_name}')
-
- with open(f'{project_name}/args.yaml', 'w') as outfile:
- yaml.dump(vars(args), outfile, allow_unicode=True, default_flow_style=False)
-
- """
- pbar = tqdm(total=len(df))
- for count, (idx, row) in enumerate(df.iterrows()):
- if not os.path.isdir(f'{project_name}/{row["file_name"]}'):
- os.mkdir(f'{project_name}/{row["file_name"]}')
- x = f'{float(row["x"]):+.3f}'.replace('.', '-')
- y = f'{float(row["y"]):+.3f}'.replace('.', '-')
-
- these_cords = save_xyz_file('./',
- xyz_pred[idx].detach().cpu().numpy(),
- f'{count:05}',
- [model_arch['norm_vals']['x'],model_arch['norm_vals']['y'],model_arch['norm_vals']['z']])
- pbar.update()
- pbar.close()
- """
- # Does not support multiple structure saving
- these_cords = save_xyz_file('./',
- xyz_pred[args.index_plot].detach().cpu().numpy(),
- 'DummyName',
- [model_arch['norm_vals']['x'],model_arch['norm_vals']['y'],model_arch['norm_vals']['z']])
- return these_cords
diff --git a/spaces/Ariharasudhan/Kenya_food_classification/README.md b/spaces/Ariharasudhan/Kenya_food_classification/README.md
deleted file mode 100644
index a364be890e5bca8aae5b71a0a6261d7aa1974e51..0000000000000000000000000000000000000000
--- a/spaces/Ariharasudhan/Kenya_food_classification/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Kenya Food Classification
-emoji: 📉
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.44.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/__init__.py b/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/scheduler_list.py b/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/scheduler_list.py
deleted file mode 100644
index 1b5399fe7f4cc1a19b6e57a74e468b995d556a18..0000000000000000000000000000000000000000
--- a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/scheduler_list.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from diffusers import (
- DDIMScheduler,
- DPMSolverMultistepScheduler,
- EulerAncestralDiscreteScheduler,
- EulerDiscreteScheduler,
- HeunDiscreteScheduler,
- LMSDiscreteScheduler,
-)
-
-diff_scheduler_list = ["DDIM", "EulerA", "Euler", "LMS", "Heun", "UniPC", "DPMSolver"]
-
-
-def get_scheduler_list(pipe, scheduler):
- if scheduler == diff_scheduler_list[0]:
- pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
-
- elif scheduler == diff_scheduler_list[1]:
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
-
- elif scheduler == diff_scheduler_list[2]:
- pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
-
- elif scheduler == diff_scheduler_list[3]:
- pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
-
- elif scheduler == diff_scheduler_list[4]:
- pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
-
- elif scheduler == diff_scheduler_list[5]:
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
-
- return pipe
diff --git a/spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese_bert.py b/spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese_bert.py
deleted file mode 100644
index f87aacca082fccf7f093b0f45f917a6a07562ecf..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese_bert.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-
-import torch
-from transformers import AutoTokenizer, AutoModelForMaskedLM
-
-import config
-from logger import logger
-from utils.download import download_and_verify
-from config import DEVICE as device
-
-URLS = [
- "https://huggingface.co/cl-tohoku/bert-base-japanese-v3/resolve/main/pytorch_model.bin",
-]
-TARGET_PATH = os.path.join(config.ABS_PATH, "bert_vits2/bert/bert-base-japanese-v3/pytorch_model.bin")
-EXPECTED_MD5 = None
-
-if not os.path.exists(TARGET_PATH):
- success, message = download_and_verify(URLS, TARGET_PATH, EXPECTED_MD5)
-
-try:
- logger.info("Loading bert-base-japanese-v3...")
- tokenizer = AutoTokenizer.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3")
- model = AutoModelForMaskedLM.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3").to(
- device)
- logger.info("Loading finished.")
-except Exception as e:
- logger.error(e)
- logger.error(f"Please download pytorch_model.bin from cl-tohoku/bert-base-japanese-v3.")
-
-
-def get_bert_feature(text, word2ph, device=config.DEVICE):
- with torch.no_grad():
- inputs = tokenizer(text, return_tensors="pt")
- for i in inputs:
- inputs[i] = inputs[i].to(device)
- res = model(**inputs, output_hidden_states=True)
- res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()
- assert inputs["input_ids"].shape[-1] == len(word2ph)
- word2phone = word2ph
- phone_level_feature = []
- for i in range(len(word2phone)):
- repeat_feature = res[i].repeat(word2phone[i], 1)
- phone_level_feature.append(repeat_feature)
-
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
-
- return phone_level_feature.T
diff --git a/spaces/Ashrafb/Imdf2/README.md b/spaces/Ashrafb/Imdf2/README.md
deleted file mode 100644
index 5024df6578b1a572edb68894093e8b056e3bdc3e..0000000000000000000000000000000000000000
--- a/spaces/Ashrafb/Imdf2/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Imdf2
-emoji: 🔥
-colorFrom: pink
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.27.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/collector.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/collector.py
deleted file mode 100644
index b3e293ea3a508dc54674349e845f9794118f548b..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/collector.py
+++ /dev/null
@@ -1,505 +0,0 @@
-"""
-The main purpose of this module is to expose LinkCollector.collect_sources().
-"""
-
-import collections
-import email.message
-import functools
-import itertools
-import json
-import logging
-import os
-import urllib.parse
-import urllib.request
-from html.parser import HTMLParser
-from optparse import Values
-from typing import (
- TYPE_CHECKING,
- Callable,
- Dict,
- Iterable,
- List,
- MutableMapping,
- NamedTuple,
- Optional,
- Sequence,
- Tuple,
- Union,
-)
-
-from pip._vendor import requests
-from pip._vendor.requests import Response
-from pip._vendor.requests.exceptions import RetryError, SSLError
-
-from pip._internal.exceptions import NetworkConnectionError
-from pip._internal.models.link import Link
-from pip._internal.models.search_scope import SearchScope
-from pip._internal.network.session import PipSession
-from pip._internal.network.utils import raise_for_status
-from pip._internal.utils.filetypes import is_archive_file
-from pip._internal.utils.misc import redact_auth_from_url
-from pip._internal.vcs import vcs
-
-from .sources import CandidatesFromPage, LinkSource, build_source
-
-if TYPE_CHECKING:
- from typing import Protocol
-else:
- Protocol = object
-
-logger = logging.getLogger(__name__)
-
-ResponseHeaders = MutableMapping[str, str]
-
-
-def _match_vcs_scheme(url: str) -> Optional[str]:
- """Look for VCS schemes in the URL.
-
- Returns the matched VCS scheme, or None if there's no match.
- """
- for scheme in vcs.schemes:
- if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
- return scheme
- return None
-
-
-class _NotAPIContent(Exception):
- def __init__(self, content_type: str, request_desc: str) -> None:
- super().__init__(content_type, request_desc)
- self.content_type = content_type
- self.request_desc = request_desc
-
-
-def _ensure_api_header(response: Response) -> None:
- """
- Check the Content-Type header to ensure the response contains a Simple
- API Response.
-
- Raises `_NotAPIContent` if the content type is not a valid content-type.
- """
- content_type = response.headers.get("Content-Type", "Unknown")
-
- content_type_l = content_type.lower()
- if content_type_l.startswith(
- (
- "text/html",
- "application/vnd.pypi.simple.v1+html",
- "application/vnd.pypi.simple.v1+json",
- )
- ):
- return
-
- raise _NotAPIContent(content_type, response.request.method)
-
-
-class _NotHTTP(Exception):
- pass
-
-
-def _ensure_api_response(url: str, session: PipSession) -> None:
- """
- Send a HEAD request to the URL, and ensure the response contains a simple
- API Response.
-
- Raises `_NotHTTP` if the URL is not available for a HEAD request, or
- `_NotAPIContent` if the content type is not a valid content type.
- """
- scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
- if scheme not in {"http", "https"}:
- raise _NotHTTP()
-
- resp = session.head(url, allow_redirects=True)
- raise_for_status(resp)
-
- _ensure_api_header(resp)
-
-
-def _get_simple_response(url: str, session: PipSession) -> Response:
- """Access an Simple API response with GET, and return the response.
-
- This consists of three parts:
-
- 1. If the URL looks suspiciously like an archive, send a HEAD first to
- check the Content-Type is HTML or Simple API, to avoid downloading a
- large file. Raise `_NotHTTP` if the content type cannot be determined, or
- `_NotAPIContent` if it is not HTML or a Simple API.
- 2. Actually perform the request. Raise HTTP exceptions on network failures.
- 3. Check the Content-Type header to make sure we got a Simple API response,
- and raise `_NotAPIContent` otherwise.
- """
- if is_archive_file(Link(url).filename):
- _ensure_api_response(url, session=session)
-
- logger.debug("Getting page %s", redact_auth_from_url(url))
-
- resp = session.get(
- url,
- headers={
- "Accept": ", ".join(
- [
- "application/vnd.pypi.simple.v1+json",
- "application/vnd.pypi.simple.v1+html; q=0.1",
- "text/html; q=0.01",
- ]
- ),
- # We don't want to blindly returned cached data for
- # /simple/, because authors generally expecting that
- # twine upload && pip install will function, but if
- # they've done a pip install in the last ~10 minutes
- # it won't. Thus by setting this to zero we will not
- # blindly use any cached data, however the benefit of
- # using max-age=0 instead of no-cache, is that we will
- # still support conditional requests, so we will still
- # minimize traffic sent in cases where the page hasn't
- # changed at all, we will just always incur the round
- # trip for the conditional GET now instead of only
- # once per 10 minutes.
- # For more information, please see pypa/pip#5670.
- "Cache-Control": "max-age=0",
- },
- )
- raise_for_status(resp)
-
- # The check for archives above only works if the url ends with
- # something that looks like an archive. However that is not a
- # requirement of an url. Unless we issue a HEAD request on every
- # url we cannot know ahead of time for sure if something is a
- # Simple API response or not. However we can check after we've
- # downloaded it.
- _ensure_api_header(resp)
-
- logger.debug(
- "Fetched page %s as %s",
- redact_auth_from_url(url),
- resp.headers.get("Content-Type", "Unknown"),
- )
-
- return resp
-
-
-def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
- """Determine if we have any encoding information in our headers."""
- if headers and "Content-Type" in headers:
- m = email.message.Message()
- m["content-type"] = headers["Content-Type"]
- charset = m.get_param("charset")
- if charset:
- return str(charset)
- return None
-
-
-class CacheablePageContent:
- def __init__(self, page: "IndexContent") -> None:
- assert page.cache_link_parsing
- self.page = page
-
- def __eq__(self, other: object) -> bool:
- return isinstance(other, type(self)) and self.page.url == other.page.url
-
- def __hash__(self) -> int:
- return hash(self.page.url)
-
-
-class ParseLinks(Protocol):
- def __call__(self, page: "IndexContent") -> Iterable[Link]:
- ...
-
-
-def with_cached_index_content(fn: ParseLinks) -> ParseLinks:
- """
- Given a function that parses an Iterable[Link] from an IndexContent, cache the
- function's result (keyed by CacheablePageContent), unless the IndexContent
- `page` has `page.cache_link_parsing == False`.
- """
-
- @functools.lru_cache(maxsize=None)
- def wrapper(cacheable_page: CacheablePageContent) -> List[Link]:
- return list(fn(cacheable_page.page))
-
- @functools.wraps(fn)
- def wrapper_wrapper(page: "IndexContent") -> List[Link]:
- if page.cache_link_parsing:
- return wrapper(CacheablePageContent(page))
- return list(fn(page))
-
- return wrapper_wrapper
-
-
-@with_cached_index_content
-def parse_links(page: "IndexContent") -> Iterable[Link]:
- """
- Parse a Simple API's Index Content, and yield its anchor elements as Link objects.
- """
-
- content_type_l = page.content_type.lower()
- if content_type_l.startswith("application/vnd.pypi.simple.v1+json"):
- data = json.loads(page.content)
- for file in data.get("files", []):
- link = Link.from_json(file, page.url)
- if link is None:
- continue
- yield link
- return
-
- parser = HTMLLinkParser(page.url)
- encoding = page.encoding or "utf-8"
- parser.feed(page.content.decode(encoding))
-
- url = page.url
- base_url = parser.base_url or url
- for anchor in parser.anchors:
- link = Link.from_element(anchor, page_url=url, base_url=base_url)
- if link is None:
- continue
- yield link
-
-
-class IndexContent:
- """Represents one response (or page), along with its URL"""
-
- def __init__(
- self,
- content: bytes,
- content_type: str,
- encoding: Optional[str],
- url: str,
- cache_link_parsing: bool = True,
- ) -> None:
- """
- :param encoding: the encoding to decode the given content.
- :param url: the URL from which the HTML was downloaded.
- :param cache_link_parsing: whether links parsed from this page's url
- should be cached. PyPI index urls should
- have this set to False, for example.
- """
- self.content = content
- self.content_type = content_type
- self.encoding = encoding
- self.url = url
- self.cache_link_parsing = cache_link_parsing
-
- def __str__(self) -> str:
- return redact_auth_from_url(self.url)
-
-
-class HTMLLinkParser(HTMLParser):
- """
- HTMLParser that keeps the first base HREF and a list of all anchor
- elements' attributes.
- """
-
- def __init__(self, url: str) -> None:
- super().__init__(convert_charrefs=True)
-
- self.url: str = url
- self.base_url: Optional[str] = None
- self.anchors: List[Dict[str, Optional[str]]] = []
-
- def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
- if tag == "base" and self.base_url is None:
- href = self.get_href(attrs)
- if href is not None:
- self.base_url = href
- elif tag == "a":
- self.anchors.append(dict(attrs))
-
- def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
- for name, value in attrs:
- if name == "href":
- return value
- return None
-
-
-def _handle_get_simple_fail(
- link: Link,
- reason: Union[str, Exception],
- meth: Optional[Callable[..., None]] = None,
-) -> None:
- if meth is None:
- meth = logger.debug
- meth("Could not fetch URL %s: %s - skipping", link, reason)
-
-
-def _make_index_content(
- response: Response, cache_link_parsing: bool = True
-) -> IndexContent:
- encoding = _get_encoding_from_headers(response.headers)
- return IndexContent(
- response.content,
- response.headers["Content-Type"],
- encoding=encoding,
- url=response.url,
- cache_link_parsing=cache_link_parsing,
- )
-
-
-def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]:
- url = link.url.split("#", 1)[0]
-
- # Check for VCS schemes that do not support lookup as web pages.
- vcs_scheme = _match_vcs_scheme(url)
- if vcs_scheme:
- logger.warning(
- "Cannot look at %s URL %s because it does not support lookup as web pages.",
- vcs_scheme,
- link,
- )
- return None
-
- # Tack index.html onto file:// URLs that point to directories
- scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
- if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
- # add trailing slash if not present so urljoin doesn't trim
- # final segment
- if not url.endswith("/"):
- url += "/"
- # TODO: In the future, it would be nice if pip supported PEP 691
- # style responses in the file:// URLs, however there's no
- # standard file extension for application/vnd.pypi.simple.v1+json
- # so we'll need to come up with something on our own.
- url = urllib.parse.urljoin(url, "index.html")
- logger.debug(" file: URL is directory, getting %s", url)
-
- try:
- resp = _get_simple_response(url, session=session)
- except _NotHTTP:
- logger.warning(
- "Skipping page %s because it looks like an archive, and cannot "
- "be checked by a HTTP HEAD request.",
- link,
- )
- except _NotAPIContent as exc:
- logger.warning(
- "Skipping page %s because the %s request got Content-Type: %s. "
- "The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
- "application/vnd.pypi.simple.v1+html, and text/html",
- link,
- exc.request_desc,
- exc.content_type,
- )
- except NetworkConnectionError as exc:
- _handle_get_simple_fail(link, exc)
- except RetryError as exc:
- _handle_get_simple_fail(link, exc)
- except SSLError as exc:
- reason = "There was a problem confirming the ssl certificate: "
- reason += str(exc)
- _handle_get_simple_fail(link, reason, meth=logger.info)
- except requests.ConnectionError as exc:
- _handle_get_simple_fail(link, f"connection error: {exc}")
- except requests.Timeout:
- _handle_get_simple_fail(link, "timed out")
- else:
- return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
- return None
-
-
-class CollectedSources(NamedTuple):
- find_links: Sequence[Optional[LinkSource]]
- index_urls: Sequence[Optional[LinkSource]]
-
-
-class LinkCollector:
-
- """
- Responsible for collecting Link objects from all configured locations,
- making network requests as needed.
-
- The class's main method is its collect_sources() method.
- """
-
- def __init__(
- self,
- session: PipSession,
- search_scope: SearchScope,
- ) -> None:
- self.search_scope = search_scope
- self.session = session
-
- @classmethod
- def create(
- cls,
- session: PipSession,
- options: Values,
- suppress_no_index: bool = False,
- ) -> "LinkCollector":
- """
- :param session: The Session to use to make requests.
- :param suppress_no_index: Whether to ignore the --no-index option
- when constructing the SearchScope object.
- """
- index_urls = [options.index_url] + options.extra_index_urls
- if options.no_index and not suppress_no_index:
- logger.debug(
- "Ignoring indexes: %s",
- ",".join(redact_auth_from_url(url) for url in index_urls),
- )
- index_urls = []
-
- # Make sure find_links is a list before passing to create().
- find_links = options.find_links or []
-
- search_scope = SearchScope.create(
- find_links=find_links,
- index_urls=index_urls,
- no_index=options.no_index,
- )
- link_collector = LinkCollector(
- session=session,
- search_scope=search_scope,
- )
- return link_collector
-
- @property
- def find_links(self) -> List[str]:
- return self.search_scope.find_links
-
- def fetch_response(self, location: Link) -> Optional[IndexContent]:
- """
- Fetch an HTML page containing package links.
- """
- return _get_index_content(location, session=self.session)
-
- def collect_sources(
- self,
- project_name: str,
- candidates_from_page: CandidatesFromPage,
- ) -> CollectedSources:
- # The OrderedDict calls deduplicate sources by URL.
- index_url_sources = collections.OrderedDict(
- build_source(
- loc,
- candidates_from_page=candidates_from_page,
- page_validator=self.session.is_secure_origin,
- expand_dir=False,
- cache_link_parsing=False,
- )
- for loc in self.search_scope.get_index_urls_locations(project_name)
- ).values()
- find_links_sources = collections.OrderedDict(
- build_source(
- loc,
- candidates_from_page=candidates_from_page,
- page_validator=self.session.is_secure_origin,
- expand_dir=True,
- cache_link_parsing=True,
- )
- for loc in self.find_links
- ).values()
-
- if logger.isEnabledFor(logging.DEBUG):
- lines = [
- f"* {s.link}"
- for s in itertools.chain(find_links_sources, index_url_sources)
- if s is not None and s.link is not None
- ]
- lines = [
- f"{len(lines)} location(s) to search "
- f"for versions of {project_name}:"
- ] + lines
- logger.debug("\n".join(lines))
-
- return CollectedSources(
- find_links=list(find_links_sources),
- index_urls=list(index_url_sources),
- )
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/timeout.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/timeout.py
deleted file mode 100644
index 78e18a6272482e3946de83c0274badc4a5cfcdfa..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/timeout.py
+++ /dev/null
@@ -1,271 +0,0 @@
-from __future__ import absolute_import
-
-import time
-
-# The default socket timeout, used by httplib to indicate that no timeout was; specified by the user
-from socket import _GLOBAL_DEFAULT_TIMEOUT, getdefaulttimeout
-
-from ..exceptions import TimeoutStateError
-
-# A sentinel value to indicate that no timeout was specified by the user in
-# urllib3
-_Default = object()
-
-
-# Use time.monotonic if available.
-current_time = getattr(time, "monotonic", time.time)
-
-
-class Timeout(object):
- """Timeout configuration.
-
- Timeouts can be defined as a default for a pool:
-
- .. code-block:: python
-
- timeout = Timeout(connect=2.0, read=7.0)
- http = PoolManager(timeout=timeout)
- response = http.request('GET', 'http://example.com/')
-
- Or per-request (which overrides the default for the pool):
-
- .. code-block:: python
-
- response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
-
- Timeouts can be disabled by setting all the parameters to ``None``:
-
- .. code-block:: python
-
- no_timeout = Timeout(connect=None, read=None)
- response = http.request('GET', 'http://example.com/, timeout=no_timeout)
-
-
- :param total:
- This combines the connect and read timeouts into one; the read timeout
- will be set to the time leftover from the connect attempt. In the
- event that both a connect timeout and a total are specified, or a read
- timeout and a total are specified, the shorter timeout will be applied.
-
- Defaults to None.
-
- :type total: int, float, or None
-
- :param connect:
- The maximum amount of time (in seconds) to wait for a connection
- attempt to a server to succeed. Omitting the parameter will default the
- connect timeout to the system default, probably `the global default
- timeout in socket.py
- `_.
- None will set an infinite timeout for connection attempts.
-
- :type connect: int, float, or None
-
- :param read:
- The maximum amount of time (in seconds) to wait between consecutive
- read operations for a response from the server. Omitting the parameter
- will default the read timeout to the system default, probably `the
- global default timeout in socket.py
- `_.
- None will set an infinite timeout.
-
- :type read: int, float, or None
-
- .. note::
-
- Many factors can affect the total amount of time for urllib3 to return
- an HTTP response.
-
- For example, Python's DNS resolver does not obey the timeout specified
- on the socket. Other factors that can affect total request time include
- high CPU load, high swap, the program running at a low priority level,
- or other behaviors.
-
- In addition, the read and total timeouts only measure the time between
- read operations on the socket connecting the client and the server,
- not the total amount of time for the request to return a complete
- response. For most requests, the timeout is raised because the server
- has not sent the first byte in the specified time. This is not always
- the case; if a server streams one byte every fifteen seconds, a timeout
- of 20 seconds will not trigger, even though the request will take
- several minutes to complete.
-
- If your goal is to cut off any request after a set amount of wall clock
- time, consider having a second "watcher" thread to cut off a slow
- request.
- """
-
- #: A sentinel object representing the default timeout value
- DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
-
- def __init__(self, total=None, connect=_Default, read=_Default):
- self._connect = self._validate_timeout(connect, "connect")
- self._read = self._validate_timeout(read, "read")
- self.total = self._validate_timeout(total, "total")
- self._start_connect = None
-
- def __repr__(self):
- return "%s(connect=%r, read=%r, total=%r)" % (
- type(self).__name__,
- self._connect,
- self._read,
- self.total,
- )
-
- # __str__ provided for backwards compatibility
- __str__ = __repr__
-
- @classmethod
- def resolve_default_timeout(cls, timeout):
- return getdefaulttimeout() if timeout is cls.DEFAULT_TIMEOUT else timeout
-
- @classmethod
- def _validate_timeout(cls, value, name):
- """Check that a timeout attribute is valid.
-
- :param value: The timeout value to validate
- :param name: The name of the timeout attribute to validate. This is
- used to specify in error messages.
- :return: The validated and casted version of the given value.
- :raises ValueError: If it is a numeric value less than or equal to
- zero, or the type is not an integer, float, or None.
- """
- if value is _Default:
- return cls.DEFAULT_TIMEOUT
-
- if value is None or value is cls.DEFAULT_TIMEOUT:
- return value
-
- if isinstance(value, bool):
- raise ValueError(
- "Timeout cannot be a boolean value. It must "
- "be an int, float or None."
- )
- try:
- float(value)
- except (TypeError, ValueError):
- raise ValueError(
- "Timeout value %s was %s, but it must be an "
- "int, float or None." % (name, value)
- )
-
- try:
- if value <= 0:
- raise ValueError(
- "Attempted to set %s timeout to %s, but the "
- "timeout cannot be set to a value less "
- "than or equal to 0." % (name, value)
- )
- except TypeError:
- # Python 3
- raise ValueError(
- "Timeout value %s was %s, but it must be an "
- "int, float or None." % (name, value)
- )
-
- return value
-
- @classmethod
- def from_float(cls, timeout):
- """Create a new Timeout from a legacy timeout value.
-
- The timeout value used by httplib.py sets the same timeout on the
- connect(), and recv() socket requests. This creates a :class:`Timeout`
- object that sets the individual timeouts to the ``timeout`` value
- passed to this function.
-
- :param timeout: The legacy timeout value.
- :type timeout: integer, float, sentinel default object, or None
- :return: Timeout object
- :rtype: :class:`Timeout`
- """
- return Timeout(read=timeout, connect=timeout)
-
- def clone(self):
- """Create a copy of the timeout object
-
- Timeout properties are stored per-pool but each request needs a fresh
- Timeout object to ensure each one has its own start/stop configured.
-
- :return: a copy of the timeout object
- :rtype: :class:`Timeout`
- """
- # We can't use copy.deepcopy because that will also create a new object
- # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
- # detect the user default.
- return Timeout(connect=self._connect, read=self._read, total=self.total)
-
- def start_connect(self):
- """Start the timeout clock, used during a connect() attempt
-
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
- to start a timer that has been started already.
- """
- if self._start_connect is not None:
- raise TimeoutStateError("Timeout timer has already been started.")
- self._start_connect = current_time()
- return self._start_connect
-
- def get_connect_duration(self):
- """Gets the time elapsed since the call to :meth:`start_connect`.
-
- :return: Elapsed time in seconds.
- :rtype: float
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
- to get duration for a timer that hasn't been started.
- """
- if self._start_connect is None:
- raise TimeoutStateError(
- "Can't get connect duration for timer that has not started."
- )
- return current_time() - self._start_connect
-
- @property
- def connect_timeout(self):
- """Get the value to use when setting a connection timeout.
-
- This will be a positive float or integer, the value None
- (never timeout), or the default system timeout.
-
- :return: Connect timeout.
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
- """
- if self.total is None:
- return self._connect
-
- if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
- return self.total
-
- return min(self._connect, self.total)
-
- @property
- def read_timeout(self):
- """Get the value for the read timeout.
-
- This assumes some time has elapsed in the connection timeout and
- computes the read timeout appropriately.
-
- If self.total is set, the read timeout is dependent on the amount of
- time taken by the connect timeout. If the connection time has not been
- established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
- raised.
-
- :return: Value to use for the read timeout.
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
- :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
- has not yet been called on this object.
- """
- if (
- self.total is not None
- and self.total is not self.DEFAULT_TIMEOUT
- and self._read is not None
- and self._read is not self.DEFAULT_TIMEOUT
- ):
- # In case the connect timeout has not yet been established.
- if self._start_connect is None:
- return self._read
- return max(0, min(self.total - self.get_connect_duration(), self._read))
- elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
- return max(0, self.total - self.get_connect_duration())
- else:
- return self._read
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/fancy_getopt.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/fancy_getopt.py
deleted file mode 100644
index 830f047e28aa3b25295174d44d735448a1a43098..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/fancy_getopt.py
+++ /dev/null
@@ -1,470 +0,0 @@
-"""distutils.fancy_getopt
-
-Wrapper around the standard getopt module that provides the following
-additional features:
- * short and long options are tied together
- * options have help strings, so fancy_getopt could potentially
- create a complete usage summary
- * options set attributes of a passed-in object
-"""
-
-import sys
-import string
-import re
-import getopt
-from distutils.errors import DistutilsGetoptError, DistutilsArgError
-
-# Much like command_re in distutils.core, this is close to but not quite
-# the same as a Python NAME -- except, in the spirit of most GNU
-# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
-# The similarities to NAME are again not a coincidence...
-longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
-longopt_re = re.compile(r'^%s$' % longopt_pat)
-
-# For recognizing "negative alias" options, eg. "quiet=!verbose"
-neg_alias_re = re.compile("^({})=!({})$".format(longopt_pat, longopt_pat))
-
-# This is used to translate long options to legitimate Python identifiers
-# (for use as attributes of some object).
-longopt_xlate = str.maketrans('-', '_')
-
-
-class FancyGetopt:
- """Wrapper around the standard 'getopt()' module that provides some
- handy extra functionality:
- * short and long options are tied together
- * options have help strings, and help text can be assembled
- from them
- * options set attributes of a passed-in object
- * boolean options can have "negative aliases" -- eg. if
- --quiet is the "negative alias" of --verbose, then "--quiet"
- on the command line sets 'verbose' to false
- """
-
- def __init__(self, option_table=None):
- # The option table is (currently) a list of tuples. The
- # tuples may have 3 or four values:
- # (long_option, short_option, help_string [, repeatable])
- # if an option takes an argument, its long_option should have '='
- # appended; short_option should just be a single character, no ':'
- # in any case. If a long_option doesn't have a corresponding
- # short_option, short_option should be None. All option tuples
- # must have long options.
- self.option_table = option_table
-
- # 'option_index' maps long option names to entries in the option
- # table (ie. those 3-tuples).
- self.option_index = {}
- if self.option_table:
- self._build_index()
-
- # 'alias' records (duh) alias options; {'foo': 'bar'} means
- # --foo is an alias for --bar
- self.alias = {}
-
- # 'negative_alias' keeps track of options that are the boolean
- # opposite of some other option
- self.negative_alias = {}
-
- # These keep track of the information in the option table. We
- # don't actually populate these structures until we're ready to
- # parse the command-line, since the 'option_table' passed in here
- # isn't necessarily the final word.
- self.short_opts = []
- self.long_opts = []
- self.short2long = {}
- self.attr_name = {}
- self.takes_arg = {}
-
- # And 'option_order' is filled up in 'getopt()'; it records the
- # original order of options (and their values) on the command-line,
- # but expands short options, converts aliases, etc.
- self.option_order = []
-
- def _build_index(self):
- self.option_index.clear()
- for option in self.option_table:
- self.option_index[option[0]] = option
-
- def set_option_table(self, option_table):
- self.option_table = option_table
- self._build_index()
-
- def add_option(self, long_option, short_option=None, help_string=None):
- if long_option in self.option_index:
- raise DistutilsGetoptError(
- "option conflict: already an option '%s'" % long_option
- )
- else:
- option = (long_option, short_option, help_string)
- self.option_table.append(option)
- self.option_index[long_option] = option
-
- def has_option(self, long_option):
- """Return true if the option table for this parser has an
- option with long name 'long_option'."""
- return long_option in self.option_index
-
- def get_attr_name(self, long_option):
- """Translate long option name 'long_option' to the form it
- has as an attribute of some object: ie., translate hyphens
- to underscores."""
- return long_option.translate(longopt_xlate)
-
- def _check_alias_dict(self, aliases, what):
- assert isinstance(aliases, dict)
- for (alias, opt) in aliases.items():
- if alias not in self.option_index:
- raise DistutilsGetoptError(
- ("invalid %s '%s': " "option '%s' not defined")
- % (what, alias, alias)
- )
- if opt not in self.option_index:
- raise DistutilsGetoptError(
- ("invalid %s '%s': " "aliased option '%s' not defined")
- % (what, alias, opt)
- )
-
- def set_aliases(self, alias):
- """Set the aliases for this option parser."""
- self._check_alias_dict(alias, "alias")
- self.alias = alias
-
- def set_negative_aliases(self, negative_alias):
- """Set the negative aliases for this option parser.
- 'negative_alias' should be a dictionary mapping option names to
- option names, both the key and value must already be defined
- in the option table."""
- self._check_alias_dict(negative_alias, "negative alias")
- self.negative_alias = negative_alias
-
- def _grok_option_table(self): # noqa: C901
- """Populate the various data structures that keep tabs on the
- option table. Called by 'getopt()' before it can do anything
- worthwhile.
- """
- self.long_opts = []
- self.short_opts = []
- self.short2long.clear()
- self.repeat = {}
-
- for option in self.option_table:
- if len(option) == 3:
- long, short, help = option
- repeat = 0
- elif len(option) == 4:
- long, short, help, repeat = option
- else:
- # the option table is part of the code, so simply
- # assert that it is correct
- raise ValueError("invalid option tuple: {!r}".format(option))
-
- # Type- and value-check the option names
- if not isinstance(long, str) or len(long) < 2:
- raise DistutilsGetoptError(
- ("invalid long option '%s': " "must be a string of length >= 2")
- % long
- )
-
- if not ((short is None) or (isinstance(short, str) and len(short) == 1)):
- raise DistutilsGetoptError(
- "invalid short option '%s': "
- "must a single character or None" % short
- )
-
- self.repeat[long] = repeat
- self.long_opts.append(long)
-
- if long[-1] == '=': # option takes an argument?
- if short:
- short = short + ':'
- long = long[0:-1]
- self.takes_arg[long] = 1
- else:
- # Is option is a "negative alias" for some other option (eg.
- # "quiet" == "!verbose")?
- alias_to = self.negative_alias.get(long)
- if alias_to is not None:
- if self.takes_arg[alias_to]:
- raise DistutilsGetoptError(
- "invalid negative alias '%s': "
- "aliased option '%s' takes a value" % (long, alias_to)
- )
-
- self.long_opts[-1] = long # XXX redundant?!
- self.takes_arg[long] = 0
-
- # If this is an alias option, make sure its "takes arg" flag is
- # the same as the option it's aliased to.
- alias_to = self.alias.get(long)
- if alias_to is not None:
- if self.takes_arg[long] != self.takes_arg[alias_to]:
- raise DistutilsGetoptError(
- "invalid alias '%s': inconsistent with "
- "aliased option '%s' (one of them takes a value, "
- "the other doesn't" % (long, alias_to)
- )
-
- # Now enforce some bondage on the long option name, so we can
- # later translate it to an attribute name on some object. Have
- # to do this a bit late to make sure we've removed any trailing
- # '='.
- if not longopt_re.match(long):
- raise DistutilsGetoptError(
- "invalid long option name '%s' "
- "(must be letters, numbers, hyphens only" % long
- )
-
- self.attr_name[long] = self.get_attr_name(long)
- if short:
- self.short_opts.append(short)
- self.short2long[short[0]] = long
-
- def getopt(self, args=None, object=None): # noqa: C901
- """Parse command-line options in args. Store as attributes on object.
-
- If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
- 'object' is None or not supplied, creates a new OptionDummy
- object, stores option values there, and returns a tuple (args,
- object). If 'object' is supplied, it is modified in place and
- 'getopt()' just returns 'args'; in both cases, the returned
- 'args' is a modified copy of the passed-in 'args' list, which
- is left untouched.
- """
- if args is None:
- args = sys.argv[1:]
- if object is None:
- object = OptionDummy()
- created_object = True
- else:
- created_object = False
-
- self._grok_option_table()
-
- short_opts = ' '.join(self.short_opts)
- try:
- opts, args = getopt.getopt(args, short_opts, self.long_opts)
- except getopt.error as msg:
- raise DistutilsArgError(msg)
-
- for opt, val in opts:
- if len(opt) == 2 and opt[0] == '-': # it's a short option
- opt = self.short2long[opt[1]]
- else:
- assert len(opt) > 2 and opt[:2] == '--'
- opt = opt[2:]
-
- alias = self.alias.get(opt)
- if alias:
- opt = alias
-
- if not self.takes_arg[opt]: # boolean option?
- assert val == '', "boolean option can't have value"
- alias = self.negative_alias.get(opt)
- if alias:
- opt = alias
- val = 0
- else:
- val = 1
-
- attr = self.attr_name[opt]
- # The only repeating option at the moment is 'verbose'.
- # It has a negative option -q quiet, which should set verbose = 0.
- if val and self.repeat.get(attr) is not None:
- val = getattr(object, attr, 0) + 1
- setattr(object, attr, val)
- self.option_order.append((opt, val))
-
- # for opts
- if created_object:
- return args, object
- else:
- return args
-
- def get_option_order(self):
- """Returns the list of (option, value) tuples processed by the
- previous run of 'getopt()'. Raises RuntimeError if
- 'getopt()' hasn't been called yet.
- """
- if self.option_order is None:
- raise RuntimeError("'getopt()' hasn't been called yet")
- else:
- return self.option_order
-
- def generate_help(self, header=None): # noqa: C901
- """Generate help text (a list of strings, one per suggested line of
- output) from the option table for this FancyGetopt object.
- """
- # Blithely assume the option table is good: probably wouldn't call
- # 'generate_help()' unless you've already called 'getopt()'.
-
- # First pass: determine maximum length of long option names
- max_opt = 0
- for option in self.option_table:
- long = option[0]
- short = option[1]
- ell = len(long)
- if long[-1] == '=':
- ell = ell - 1
- if short is not None:
- ell = ell + 5 # " (-x)" where short == 'x'
- if ell > max_opt:
- max_opt = ell
-
- opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
-
- # Typical help block looks like this:
- # --foo controls foonabulation
- # Help block for longest option looks like this:
- # --flimflam set the flim-flam level
- # and with wrapped text:
- # --flimflam set the flim-flam level (must be between
- # 0 and 100, except on Tuesdays)
- # Options with short names will have the short name shown (but
- # it doesn't contribute to max_opt):
- # --foo (-f) controls foonabulation
- # If adding the short option would make the left column too wide,
- # we push the explanation off to the next line
- # --flimflam (-l)
- # set the flim-flam level
- # Important parameters:
- # - 2 spaces before option block start lines
- # - 2 dashes for each long option name
- # - min. 2 spaces between option and explanation (gutter)
- # - 5 characters (incl. space) for short option name
-
- # Now generate lines of help text. (If 80 columns were good enough
- # for Jesus, then 78 columns are good enough for me!)
- line_width = 78
- text_width = line_width - opt_width
- big_indent = ' ' * opt_width
- if header:
- lines = [header]
- else:
- lines = ['Option summary:']
-
- for option in self.option_table:
- long, short, help = option[:3]
- text = wrap_text(help, text_width)
- if long[-1] == '=':
- long = long[0:-1]
-
- # Case 1: no short option at all (makes life easy)
- if short is None:
- if text:
- lines.append(" --%-*s %s" % (max_opt, long, text[0]))
- else:
- lines.append(" --%-*s " % (max_opt, long))
-
- # Case 2: we have a short option, so we have to include it
- # just after the long option
- else:
- opt_names = "{} (-{})".format(long, short)
- if text:
- lines.append(" --%-*s %s" % (max_opt, opt_names, text[0]))
- else:
- lines.append(" --%-*s" % opt_names)
-
- for ell in text[1:]:
- lines.append(big_indent + ell)
- return lines
-
- def print_help(self, header=None, file=None):
- if file is None:
- file = sys.stdout
- for line in self.generate_help(header):
- file.write(line + "\n")
-
-
-def fancy_getopt(options, negative_opt, object, args):
- parser = FancyGetopt(options)
- parser.set_negative_aliases(negative_opt)
- return parser.getopt(args, object)
-
-
-WS_TRANS = {ord(_wschar): ' ' for _wschar in string.whitespace}
-
-
-def wrap_text(text, width):
- """wrap_text(text : string, width : int) -> [string]
-
- Split 'text' into multiple lines of no more than 'width' characters
- each, and return the list of strings that results.
- """
- if text is None:
- return []
- if len(text) <= width:
- return [text]
-
- text = text.expandtabs()
- text = text.translate(WS_TRANS)
- chunks = re.split(r'( +|-+)', text)
- chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
- lines = []
-
- while chunks:
- cur_line = [] # list of chunks (to-be-joined)
- cur_len = 0 # length of current line
-
- while chunks:
- ell = len(chunks[0])
- if cur_len + ell <= width: # can squeeze (at least) this chunk in
- cur_line.append(chunks[0])
- del chunks[0]
- cur_len = cur_len + ell
- else: # this line is full
- # drop last chunk if all space
- if cur_line and cur_line[-1][0] == ' ':
- del cur_line[-1]
- break
-
- if chunks: # any chunks left to process?
- # if the current line is still empty, then we had a single
- # chunk that's too big too fit on a line -- so we break
- # down and break it up at the line width
- if cur_len == 0:
- cur_line.append(chunks[0][0:width])
- chunks[0] = chunks[0][width:]
-
- # all-whitespace chunks at the end of a line can be discarded
- # (and we know from the re.split above that if a chunk has
- # *any* whitespace, it is *all* whitespace)
- if chunks[0][0] == ' ':
- del chunks[0]
-
- # and store this line in the list-of-all-lines -- as a single
- # string, of course!
- lines.append(''.join(cur_line))
-
- return lines
-
-
-def translate_longopt(opt):
- """Convert a long option name to a valid Python identifier by
- changing "-" to "_".
- """
- return opt.translate(longopt_xlate)
-
-
-class OptionDummy:
- """Dummy class just used as a place to hold command-line option
- values as instance attributes."""
-
- def __init__(self, options=[]):
- """Create a new OptionDummy instance. The attributes listed in
- 'options' will be initialized to None."""
- for opt in options:
- setattr(self, opt, None)
-
-
-if __name__ == "__main__":
- text = """\
-Tra-la-la, supercalifragilisticexpialidocious.
-How *do* you spell that odd word, anyways?
-(Someone ask Mary -- she'll know [or she'll
-say, "How should I know?"].)"""
-
- for w in (10, 20, 30, 40):
- print("width: %d" % w)
- print("\n".join(wrap_text(text, w)))
- print()
diff --git a/spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/chinese.py b/spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/chinese.py
deleted file mode 100644
index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000
--- a/spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/chinese.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import os
-import re
-
-import cn2an
-from pypinyin import lazy_pinyin, Style
-
-from text import symbols
-from text.symbols import punctuation
-from text.tone_sandhi import ToneSandhi
-
-current_file_path = os.path.dirname(__file__)
-pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in
- open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()}
-
-import jieba.posseg as psg
-
-
-rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- '$': '.',
- '“': "'",
- '”': "'",
- '‘': "'",
- '’': "'",
- '(': "'",
- ')': "'",
- '(': "'",
- ')': "'",
- '《': "'",
- '》': "'",
- '【': "'",
- '】': "'",
- '[': "'",
- ']': "'",
- '—': "-",
- '~': "-",
- '~': "-",
- '「': "'",
- '」': "'",
-
-}
-
-tone_modifier = ToneSandhi()
-
-def replace_punctuation(text):
- text = text.replace("嗯", "恩").replace("呣","母")
- pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys()))
-
- replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
-
- replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text)
-
- return replaced_text
-
-def g2p(text):
- pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation))
- sentences = [i for i in re.split(pattern, text) if i.strip()!='']
- phones, tones, word2ph = _g2p(sentences)
- assert sum(word2ph) == len(phones)
- assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch.
- phones = ['_'] + phones + ["_"]
- tones = [0] + tones + [0]
- word2ph = [1] + word2ph + [1]
- return phones, tones, word2ph
-
-
-def _get_initials_finals(word):
- initials = []
- finals = []
- orig_initials = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.INITIALS)
- orig_finals = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for c, v in zip(orig_initials, orig_finals):
- initials.append(c)
- finals.append(v)
- return initials, finals
-
-
-def _g2p(segments):
- phones_list = []
- tones_list = []
- word2ph = []
- for seg in segments:
- pinyins = []
- # Replace all English words in the sentence
- seg = re.sub('[a-zA-Z]+', '', seg)
- seg_cut = psg.lcut(seg)
- initials = []
- finals = []
- seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
- for word, pos in seg_cut:
- if pos == 'eng':
- continue
- sub_initials, sub_finals = _get_initials_finals(word)
- sub_finals = tone_modifier.modified_tone(word, pos,
- sub_finals)
- initials.append(sub_initials)
- finals.append(sub_finals)
-
- # assert len(sub_initials) == len(sub_finals) == len(word)
- initials = sum(initials, [])
- finals = sum(finals, [])
- #
- for c, v in zip(initials, finals):
- raw_pinyin = c+v
- # NOTE: post process for pypinyin outputs
- # we discriminate i, ii and iii
- if c == v:
- assert c in punctuation
- phone = [c]
- tone = '0'
- word2ph.append(1)
- else:
- v_without_tone = v[:-1]
- tone = v[-1]
-
- pinyin = c+v_without_tone
- assert tone in '12345'
-
- if c:
- # 多音节
- v_rep_map = {
- "uei": 'ui',
- 'iou': 'iu',
- 'uen': 'un',
- }
- if v_without_tone in v_rep_map.keys():
- pinyin = c+v_rep_map[v_without_tone]
- else:
- # 单音节
- pinyin_rep_map = {
- 'ing': 'ying',
- 'i': 'yi',
- 'in': 'yin',
- 'u': 'wu',
- }
- if pinyin in pinyin_rep_map.keys():
- pinyin = pinyin_rep_map[pinyin]
- else:
- single_rep_map = {
- 'v': 'yu',
- 'e': 'e',
- 'i': 'y',
- 'u': 'w',
- }
- if pinyin[0] in single_rep_map.keys():
- pinyin = single_rep_map[pinyin[0]]+pinyin[1:]
-
- assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
- phone = pinyin_to_symbol_map[pinyin].split(' ')
- word2ph.append(len(phone))
-
- phones_list += phone
- tones_list += [int(tone)] * len(phone)
- return phones_list, tones_list, word2ph
-
-
-
-def text_normalize(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- text = replace_punctuation(text)
- return text
-
-def get_bert_feature(text, word2ph):
- from text import chinese_bert
- return chinese_bert.get_bert_feature(text, word2ph)
-
-if __name__ == '__main__':
- from text.chinese_bert import get_bert_feature
- text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
- text = text_normalize(text)
- print(text)
- phones, tones, word2ph = g2p(text)
- bert = get_bert_feature(text, word2ph)
-
- print(phones, tones, word2ph, bert.shape)
-
-
-# # 示例用法
-# text = "这是一个示例文本:,你好!这是一个测试...."
-# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
diff --git a/spaces/Benson/text-generation/Examples/Blackpink El Juego Apkmirror.md b/spaces/Benson/text-generation/Examples/Blackpink El Juego Apkmirror.md
deleted file mode 100644
index 25ef3b30b7dd479819526ec7bd768ac2f4e8cd97..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Blackpink El Juego Apkmirror.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
BLACKPINK EL JUEGO: Una Guía Completa para Fans y Recién Llegados
` | | Introducción: Una breve descripción de lo que es el juego, quién lo desarrolló, cuándo fue lanzado, y qué plataformas está disponible en. | `
BLACKPINK EL JUEGO es un juego móvil que te permite convertirte en el productor del famoso grupo K-pop BLACKPINK. Desarrollado por TakeOne Company y lanzado en junio de 2023, el juego está disponible para dispositivos Android e iOS. En este juego, puede administrar su propia agencia, entrenar y subir de nivel a sus miembros BLACKPINK, recoger y actualizar las tarjetas de fotos, jugar minijuegos con amigos y personalizar sus avatares con varios trajes. Si usted es un duro parpadeo o un recién llegado curioso, este juego le ofrecerá una experiencia divertida e inmersiva de ser parte del mundo BLACKPINK.
` | | H2: Cómo descargar e instalar BLACKPINK EL JUEGO | `
Cómo descargar e instalar BLACKPINK EL JUEGO
` | | | Subtítulo: Para usuarios de Android | `
Para usuarios de Android
` | Párrafo: Explicar cómo descargar el juego desde Google Store o MiAPKr, y cómo instalarlo en tu dispositivo. | Si tienes un dispositivo Android, puedes descargar BLACKPINK THE GAME desde Google Play Store o APKMirror. Para descargar desde Google Play Store, simplemente busca "BLACKPINK THE GAME" o haz clic en este [link]( 2 ). Luego, toca "Instalar" y espera a que el juego se descargue e instale en tu dispositivo. Para descargar desde APKMirror, vaya a este [link]( 1 ) y elija la última versión del juego. A continuación, toque en "Descargar APK" y esperar a que el archivo para descargar. Siguiente, ir a la configuración de su dispositivo y habilitar "Fuentes desconocidas" en "Seguridad". Finalmente, busque el archivo descargado en su administrador de archivos y toque en él para instalarlo en su dispositivo.
-
` | | Párrafo: Explica cómo descargar el juego desde App Store o Uptodown, y cómo instalarlo en tu dispositivo. | Si tienes un dispositivo iOS, puedes descargar BLACKPINK THE GAME desde la aplicación
-
-
BLACKPINK THE GAME es un juego móvil que te permite convertirte en el productor del famoso grupo K-pop BLACKPINK. Desarrollado por TakeOne Company y lanzado en junio de 2023, el juego está disponible para dispositivos Android e iOS. En este juego, puede administrar su propia agencia, entrenar y subir de nivel a sus miembros BLACKPINK, recoger y actualizar las tarjetas de fotos, jugar minijuegos con amigos y personalizar sus avatares con varios trajes. Si usted es un duro parpadeo o un recién llegado curioso, este juego le ofrecerá una experiencia divertida e inmersiva de ser parte del mundo BLACKPINK.
-
Cómo descargar e instalar BLACKPINK EL JUEGO
-
Para usuarios de Android
-
Si tienes un dispositivo Android, puedes descargar BLACKPINK THE GAME desde Google Play Store o APKMirror. Para descargar desde Google Play Store, simplemente busca "BLACKPINK THE GAME" o haz clic en este [link]. Luego, toca "Instalar" y espera a que el juego se descargue e instale en tu dispositivo. Para descargar desde APKMirror, vaya a este [enlace] y elija la última versión del juego. A continuación, toque en "Descargar APK" y esperar a que el archivo para descargar. Siguiente, ir a la configuración de su dispositivo y habilitar "Fuentes desconocidas" en "Seguridad". Finalmente, busque el archivo descargado en su administrador de archivos y toque en él para instalarlo en su dispositivo.
-
Para usuarios de iOS
-
Si tienes un dispositivo iOS, puedes descargar BLACKPINK THE GAME desde la App Store o Uptodown. Para descargar desde la App Store, simplemente busque "BLACKPINK THE GAME" o haga clic en este [link]. Luego, toca "Obtener" y espera a que el juego se descargue e instale en tu dispositivo. Para descargar desde Uptodown, vaya a este [enlace] y elija la última versión del juego. Luego, toque en "Descargar" y espere a que el archivo se descargue. A continuación, vaya a la configuración de su dispositivo y habilite "Trust Uptodown Enterprise" en "General" > "Administración de dispositivos". Finalmente, busque el archivo descargado en su administrador de archivos y toque en él para instalarlo en su dispositivo.
-
-
Modo de gestión
-
El modo de gestión es donde puede ejecutar su propia agencia y entrenar a sus miembros BLACKPINK. Puede construir varias habitaciones en su estudio que ofrecen diferentes beneficios y funciones. Por ejemplo, la sala de desarrollo de mercancías genera oro cada segundo, mientras que el estudio de grabación distribuye álbumes que se utilizan como energía para jugar puzzles. También puedes construir salas de entrenamiento para la voz, la danza, la actuación, etc., donde puedes mejorar las habilidades de tus miembros. Para construir o mejorar las habitaciones, necesita oro y polvo de estrellas. El oro se puede recoger de las habitaciones o ganar completando tareas y rompecabezas. El polvo de estrellas se puede recoger de habitaciones u obtener combinando tarjetas fotográficas.
-
Modo de rompecabezas
-
El modo de rompecabezas es donde puede borrar los horarios de BLACKPINK y ganar recompensas como oro, álbumes, polvo de estrellas, tarjetas fotográficas y artículos de bonificación. Para jugar al modo puzzle, necesitas usar álbumes como energía. Cada programa consta de varias etapas con diferentes objetivos y dificultades. Necesitas borrar bloques deslizando tu
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Cheat Kick El Amigo 2.md b/spaces/Benson/text-generation/Examples/Descargar Cheat Kick El Amigo 2.md
deleted file mode 100644
index ef61938ce6d112d2291d0ad993d002e08fbca97b..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Cheat Kick El Amigo 2.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
Cómo descargar códigos de trucos para Kick the Buddy 2
-
Kick the Buddy 2 es un popular juego de simulación donde puedes liberar tu creatividad e imaginación torturando a un muñeco de trapo llamado Buddy con varias armas, herramientas y elementos. Es un juego divertido y que alivia el estrés que te permite experimentar con diferentes formas de destruir, quemar, cortar, explotar y aplastar a Buddy. También puedes personalizar la apariencia, la voz y el fondo de Buddy, así como jugar minijuegos con él.
-
Sin embargo, si desea llevar su experiencia de juego al siguiente nivel, es posible que desee intentar usar códigos de trucos para Kick the Buddy 2. Los códigos de trucos son comandos o códigos especiales que pueden modificar o mejorar algunos aspectos del juego, como darle dinero ilimitado, oro, armas u objetos. Mediante el uso de códigos de trucos, puede desbloquear todas las características del juego sin gastar dinero real o tiempo.
Usar códigos de trucos para Kick the Buddy 2 puede tener muchos beneficios, como:
-
-
Puedes acceder a todas las armas, herramientas y elementos del juego sin tener que ver anuncios o completar tareas.
-
Puedes experimentar con diferentes combinaciones de armas y elementos para crear escenarios más divertidos e hilarantes.
-
Puede personalizar la apariencia, la voz y el fondo de Buddy para adaptarse a sus preferencias o estado de ánimo.
-
Puedes desafiarte a ti mismo probando diferentes niveles de dificultad o modos.
-
Usted puede tener más diversión y satisfacción por golpear a Buddy en cualquier forma que desee.
-
-
Si usted está interesado en el uso de códigos de trucos para Kick the Buddy 2, es posible que se pregunte cómo encontrarlos y cómo descargarlos. En este artículo, lo guiaremos a través del proceso de encontrar, descargar y usar códigos de trucos para Kick the Buddy 2 en dispositivos Android e iOS. También te daremos algunos consejos y trucos sobre cómo usar códigos de trucos de manera efectiva y responsable.
-
Cómo encontrar códigos de trucos para Kick the Buddy 2
-
-
Por lo tanto, debe ser cuidadoso y selectivo al buscar códigos de trucos en línea. Aquí hay algunos consejos sobre cómo encontrar fuentes confiables y legítimas para los códigos de trucos:
-
-
Busca sitios web que tengan reseñas, valoraciones, comentarios o comentarios positivos de otros usuarios. También puede consultar foros o comunidades en línea donde los jugadores comparten sus experiencias y recomendaciones.
-
Busque sitios web que tienen instrucciones claras y detalladas sobre cómo descargar y usar códigos de trucos. También deben proporcionar capturas de pantalla o videos como prueba de que sus códigos de trucos funcionan.
-
Busca sitios web que tengan una base de datos grande y actualizada de códigos de trucos para varios juegos, incluyendo Kick the Buddy 2. También deberían tener una función de búsqueda o una opción de filtro para ayudarte a encontrar los códigos de trucos que necesitas.
-
-
Algunos ejemplos de sitios web que proporcionan códigos de trucos de juegos son:
-
-
-
Sitio web
-
Descripción
-
-
-
[CheatCodes.com]( 1 )
-
Uno de los sitios de código de trucos de juegos más antiguos y populares con un archivo grande y completo de códigos de trucos para varias plataformas.
-
-
-
[Radar de juegos]( 1 )
-
Un sitio de revisión de juegos y noticias que también ofrece códigos de trucos, guías, tutoriales y consejos para varios juegos.
-
-
-
[Sucede un truco]( 1 )
-
Un sitio dedicado para códigos de trucos y entrenadores de PC que también cuenta con reseñas de juegos, fondos de pantalla y tutoriales.
-
-
-
[GameWinners]( 1 )
-
Un sitio que tiene una gran colección de códigos de trucos, preguntas frecuentes, guías y desbloqueables para varios juegos.
-
-
-
[Código de trucos Central]( 1 )
-
Un sitio que tiene una amplia base de datos de códigos de trucos, guías, comentarios y noticias para varios juegos.
-
-
-
Cómo descargar códigos de trucos para Kick the Buddy 2
-
-
Cómo descargar códigos de trucos para dispositivos Android
-
-
Asegúrese de que su dispositivo está conectado a Internet y tiene suficiente espacio de almacenamiento.
-
Ir a la página web que ofrece los códigos de trucos que desea descargar y siga las instrucciones sobre cómo descargarlos. Es posible que deba ingresar su dirección de correo electrónico, completar una encuesta o ver un anuncio para obtener acceso al enlace de descarga.
-
Una vez que tenga el enlace de descarga, toque en él y espere a que el archivo para descargar. El archivo puede estar en forma de un APK (Android Package Kit) o un archivo ZIP (comprimido).
-
Si el archivo es un archivo APK, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en el dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo.
-
Si el archivo es un archivo ZIP, debe extraerlo usando una aplicación de administrador de archivos o una aplicación de extractor ZIP. Puedes descargar estas aplicaciones desde Google Play Store si aún no las tienes.
-
Después de extraer o instalar el archivo, debería ver un nuevo icono en la pantalla de inicio del dispositivo o en el cajón de aplicaciones. Esta es la aplicación de código de trucos que necesita para iniciar antes de jugar Kick the Buddy 2.
-
-
Cómo descargar códigos de trucos para dispositivos iOS
-
-
Asegúrese de que su dispositivo está conectado a Internet y tiene suficiente espacio de almacenamiento.
-
Ir a la página web que ofrece los códigos de trucos que desea descargar y siga las instrucciones sobre cómo descargarlos. Es posible que deba ingresar su dirección de correo electrónico, completar una encuesta o ver un anuncio para obtener acceso al enlace de descarga.
-
Una vez que tenga el enlace de descarga, toque en él y espere a que el archivo para descargar. El archivo puede estar en forma de un IPA (paquete de iOS App Store) o un archivo ZIP (comprimido).
-
-
Si el archivo es un archivo ZIP, debe extraerlo usando una aplicación de administrador de archivos o una aplicación de extractor ZIP. Puedes descargar estas aplicaciones desde la App Store si aún no las tienes.
-
Después de extraer o instalar el archivo, debería ver un nuevo icono en la pantalla de inicio del dispositivo o en el cajón de aplicaciones. Esta es la aplicación de código de trucos que necesita para iniciar antes de jugar Kick the Buddy 2.
-
-
Cómo usar códigos de trucos para Kick the Buddy 2
Después de descargar e instalar la aplicación de código de trucos, debe activarlo y usarlo en el juego. El proceso puede variar dependiendo del tipo de aplicación de código de trucos que tenga y las características que ofrece. Sin embargo, aquí hay algunos pasos generales a seguir:
-
-
Inicie la aplicación de código de trucos y otorgue los permisos necesarios para acceder a los datos y configuraciones de su dispositivo.
-
Seleccione el juego que desea engañar de la lista de juegos soportados. En este caso, seleccione Kick the Buddy 2.
-
Seleccione los códigos de trucos que desea utilizar de la lista de opciones disponibles. Puedes elegir entre diferentes categorías como dinero, oro, armas, artículos o modos.
-
Toque en el botón aplicar o activar para habilitar los códigos de trucos. Es posible que tenga que esperar unos segundos o minutos para que los códigos de trucos surtan efecto.
-
Iniciar el juego y disfrutar de jugar con los códigos de trucos. Deberías ver los cambios en la interfaz de tu juego, como el aumento de dinero, oro o armas desbloqueadas.
-
-
Cómo usar códigos de trucos para Kick the Buddy 2
-
Ahora que ha activado los códigos de trucos, puede usarlos para mejorar su experiencia de juego y divertirse más con Kick the Buddy 2. Sin embargo, debe ser consciente de los tipos de códigos de trucos que está utilizando y sus efectos en el juego y las características. Aquí hay algunos consejos y trucos sobre cómo usar códigos de trucos de manera efectiva y responsable:
-
Los tipos de códigos de trucos disponibles para Kick the Buddy 2
-
-
-
Códigos de trucos de dinero: Estos códigos de trucos le dan cantidades ilimitadas o mayores de dinero en el juego. Puedes usar dinero para comprar más armas, herramientas, elementos u objetos en el juego.
-
Códigos de trucos de oro: Estos códigos de trucos te dan cantidades ilimitadas o aumentadas de oro en el juego. Puedes usar oro para desbloquear funciones premium, como membresía VIP, armas exclusivas o modos especiales.
-
Códigos de trucos de armas: Estos códigos de trucos le dan acceso a todas o algunas de las armas en el juego sin tener que comprarlos o ver anuncios. Puedes usar armas para torturar a Buddy de diferentes maneras.
-
ítem códigos de trucos: Estos códigos de trucos le dan acceso a todos o algunos de los elementos en el juego sin tener que comprarlos o ver anuncios. Puedes usar elementos para personalizar la apariencia, la voz o el fondo de Buddy.
-
Códigos de trucos de modo: Estos códigos de trucos le dan acceso a diferentes modos o niveles de dificultad en el juego sin tener que desbloquearlos o completar tareas. Puedes usar modos para desafiarte o probar nuevos escenarios.
-
-
Los efectos de los códigos de trucos en el juego y las características
-
Los códigos de trucos pueden tener varios efectos en el juego y las características de Kick the Buddy 2, dependiendo del tipo y la cantidad de códigos de trucos que esté utilizando. Algunos de ellos son:
-
-
-
Los códigos de trucos pueden hacer el juego más fácil o más difícil para usted, dependiendo de su preferencia y nivel de habilidad. Por ejemplo, si quieres pasar un rato relajante con Buddy, puedes usar códigos de trucos de dinero o oro para comprar más armas y artículos. Si quieres tener un momento difícil con Buddy, puedes usar códigos de trucos de modo para aumentar la dificultad o cambiar las reglas.
-
-
Los códigos de trucos pueden hacer que el juego sea más gratificante o menos gratificante para ti, dependiendo de tu objetivo y motivación. Por ejemplo, si quieres pasar un rato gratificante con Buddy, puedes usar códigos de trucos de modo para desbloquear logros o trofeos. Si quieres tener un tiempo menos gratificante con Buddy, puedes usar códigos de trucos de dinero o oro para saltarte algunas tareas o desafíos.
-
-
Los consejos y trucos para utilizar códigos de trucos de manera eficaz y responsable
-
Los códigos de trucos pueden ser una gran manera de mejorar tu experiencia de juego y divertirte más con Kick the Buddy 2. Sin embargo, debes usarlos con sabiduría y responsabilidad. Aquí hay algunos consejos y trucos sobre cómo hacerlo:
-
-
Usa códigos de trucos con moderación y moderación. No los uses demasiado ni confíes demasiado en ellos. De lo contrario, puede perder interés en el juego o arruinar su encanto y atractivo original.
-
Usa códigos de trucos de forma selectiva y apropiada. No los uses para cada aspecto o característica del juego. Úselos solo para las partes o características que encuentre difíciles, aburridas o inaccesibles.
-
Usa códigos de trucos de forma creativa y experimental. No los uses para repetir las mismas acciones o escenarios. Úsalos para probar cosas nuevas o descubrir nuevas posibilidades.
-
Use códigos de trampa ética y legalmente. No los use para dañar, ofender o engañar a otros. Úsalos solo para tu propio disfrute y entretenimiento personal.
-
-
Conclusión
-
Kick the Buddy 2 es un divertido y relajante juego que te permite torturar a un muñeco de trapo llamado Buddy con varias armas, herramientas y elementos. Sin embargo, si quieres darle vida a tu experiencia de juego y divertirte más, puedes usar códigos de trucos para Kick the Buddy 2. Los códigos de trucos son comandos o códigos especiales que pueden modificar o mejorar algunos aspectos del juego, como darte dinero ilimitado, oro, armas u objetos.
-
-
Si estás interesado en usar códigos de trucos para Kick the Buddy 2, te animamos a probarlos y ver por ti mismo cómo pueden mejorar tu experiencia de juego y satisfacción. Sin embargo, también te recordamos que los uses de forma inteligente y responsable, ya que pueden tener algunos efectos negativos en la jugabilidad y las características. También le aconsejamos que sea cuidadoso y selectivo cuando busque códigos de trucos en línea, ya que algunos de ellos pueden ser falsos o dañinos.
-
Esperamos que hayas disfrutado leyendo este artículo y hayas aprendido algo nuevo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Gracias por su tiempo y atención.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre códigos de trucos para Kick the Buddy 2:
-
-
Q: ¿Son legales los códigos de trucos para Kick the Buddy 2?
-
A: Códigos de trucos para Kick the Buddy 2 no son ilegales, ya que no violan ninguna ley o reglamento. Sin embargo, pueden estar en contra de los términos de servicio o políticas del desarrollador o editor de juegos. Por lo tanto, el uso de códigos de trucos puede resultar en algunas consecuencias, como ser expulsado del juego o perder su progreso.
-
Q: ¿Son seguros los códigos de trucos para Kick the Buddy 2?
-
A: Códigos de trucos para Kick the Buddy 2 no siempre son seguros, ya que algunos de ellos pueden contener virus, malware, spyware u otro software dañino que puede dañar su dispositivo o robar sus datos. Por lo tanto, debe ser cuidadoso y selectivo al buscar códigos de trucos en línea. Solo debes descargar códigos de trucos de fuentes confiables y de buena reputación que tengan reseñas, valoraciones, comentarios o comentarios positivos de otros usuarios.
-
Q: ¿Los códigos de trucos para Kick the Buddy 2 funcionan sin conexión?
-
-
Q: ¿Puedo usar códigos de trucos para Kick the Buddy 2 en otros dispositivos?
-
A: Códigos de trucos para Kick the Buddy 2 pueden o no funcionar en otros dispositivos, dependiendo de la compatibilidad y las especificaciones de los dispositivos. Algunos códigos de trucos solo pueden funcionar en dispositivos o plataformas específicas, como Android o iOS. Algunos códigos de trucos pueden funcionar en múltiples dispositivos o plataformas, pero pueden requerir diferentes pasos o métodos para descargarlos y usarlos. Por lo tanto, debe verificar la compatibilidad y las especificaciones de los códigos de trucos antes de usarlos.
-
Q: ¿Puedo usar códigos de trucos para Kick the Buddy 2 con otros juegos?
-
A: Códigos de trucos para Kick the Buddy 2 pueden o no funcionar con otros juegos, dependiendo de la similitud y compatibilidad de los juegos. Algunos códigos de trucos solo pueden funcionar con Kick the Buddy 2 o su secuela o spin-off juegos. Algunos códigos de trucos pueden funcionar con otros juegos que tienen características o mecánicas similares, pero pueden tener diferentes efectos o resultados. Por lo tanto, es necesario comprobar la similitud y compatibilidad de los juegos antes de usar códigos de trucos.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Derby De Demolicin 3 Mod Apk.md b/spaces/Benson/text-generation/Examples/Descargar Derby De Demolicin 3 Mod Apk.md
deleted file mode 100644
index 1f9dd6743916661cf3d138980cac75c825aa2478..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Derby De Demolicin 3 Mod Apk.md
+++ /dev/null
@@ -1,58 +0,0 @@
-
-
Descargar Demolición Derby 3 Mod APK y disfrutar del último juego de destrucción de coches
-
Si eres un fan de las carreras de coches y los juegos de choque, entonces te encantará Demolition Derby 3. Este es un juego donde puedes conducir tu coche a otros coches y causar tanto daño como sea posible. También puede personalizar su coche con diferentes piezas y trabajos de pintura, y competir con otros jugadores en línea o fuera de línea. En este artículo, le diremos todo lo que necesita saber sobre Demolition Derby 3, y cómo descargar la versión mod APK del juego que le da dinero ilimitado, todos los coches desbloqueados, y sin anuncios.
-
¿Qué es Demolition Derby 3?
-
Demolition Derby 3 es un juego de destrucción de automóviles desarrollado por Beer Money Games. Es la secuela de la popular Demolition Derby 2, que tiene más de 50 millones de descargas en Google Play. En Demolition Derby 3, puedes elegir entre más de 40 coches diferentes, cada uno con sus propias estadísticas y habilidades. También puede actualizar su coche con varias partes, como motores, armaduras, ruedas, alerones y más. También puede cambiar el color y el diseño de su coche para que parezca único.
Demolition Derby 3 tiene muchas características que hacen que sea divertido y emocionante jugar. Aquí están algunas de ellas:
-
Modo multijugador
-
Puedes jugar Demolition Derby 3 online con otros jugadores de todo el mundo. Puede unirse o crear un lobby, y elegir entre diferentes modos de juego, como free-for-all, deathmatch equipo, capturar la bandera, rey de la colina, y más. También puedes chatear con otros jugadores y hacer amigos o enemigos.
-
Coches personalizables
-
Puede personalizar su coche con más de 1000 piezas y calcomanías. Puede cambiar el motor, la transmisión, la suspensión, los frenos, los neumáticos, la armadura, el escape, turbo, nitro y más. También puede cambiar el color y el diseño de su automóvil con varios trabajos de pintura, pegatinas, llamas, rayas y más. Puedes hacer que tu auto se vea genial o loco.
-
-
Puedes conducir tu coche en diferentes arenas y eventos. Hay más de 20 arenas para elegir, cada una con su propio diseño y obstáculos. Algunas arenas tienen rampas, lazos, puentes, túneles, paredes, barriles, cajas y más. También puedes participar en diferentes eventos, como carreras de demolición, carreras de eliminación, último hombre en pie, modo de supervivencia, modo de truco, batallas de jefes y más. Cada evento tiene sus propias reglas y recompensas.
-
Física y gráficos realistas
-
Demolition Derby 3 tiene física y gráficos realistas que hacen que el juego sea más inmersivo y realista. Los coches tienen modelos realistas de daños que muestran las abolladuras, arañazos, chispas, humo, fuego y explosiones que ocurren cuando chocan. Los coches también tienen sonidos realistas que coinciden con sus motores, frenos, bocinas, accidentes y más. Las arenas tienen iluminación realista y sombras que crean una atmósfera dinámica.
-
¿Por qué descargar Demolition Derby 3 Mod APK?
-
Demolition Derby 3 es un juego gratuito para jugar en dispositivos Android. Sin embargo, también tiene algunas limitaciones y desventajas que pueden afectar su experiencia de juego. Por ejemplo, es necesario ganar dinero en el juego para comprar y mejorar sus coches, que puede tomar mucho tiempo y esfuerzo. También necesitas ver anuncios para obtener algunas recompensas adicionales, que pueden ser molestas y distracciones. Además, no todos los coches están disponibles en el juego, y algunos de ellos están bloqueados detrás de un muro de pago.
-
Es por eso que es posible que desee descargar la versión mod APK de Demolition Derby 3. Esta es una versión modificada del juego que le da algunas ventajas y beneficios que no se pueden obtener en la versión original. Estos son algunos de ellos:
-
Beneficios de la demolición Derby 3 Mod APK
-
Demolición Derby 3 Mod APK tiene muchos beneficios que hacen el juego más agradable y satisfactorio. Estos son algunos de ellos:
-
-
Dinero ilimitado
-
-
Todos los coches desbloqueados
-
Con Demolition Derby 3 Mod APK, usted tendrá todos los coches desbloqueados en el juego. Esto significa que puede elegir entre más de 40 coches diferentes, cada uno con sus propias estadísticas y habilidades. También puedes personalizar tu coche con más de 1000 piezas y calcomanías. Puedes tener la colección de coches más diversa y única del juego.
-
No hay anuncios
-
Con Demolition Derby 3 Mod APK, no tendrás anuncios en el juego. Esto significa que puedes jugar el juego sin interrupciones o distracciones. También puedes disfrutar del juego sin perder tiempo ni datos al ver anuncios. Puedes tener la experiencia de juego más fluida y fluida.
-
¿Cómo descargar e instalar Demolition Derby 3 Mod APK?
-
Si está interesado en descargar e instalar Demolition Derby 3 Mod APK, entonces usted necesita seguir algunos pasos simples. Aquí están:
-
Pasos para descargar e instalar Demolition Derby 3 Mod APK
-
Paso 1: Habilitar fuentes desconocidas
-
El primer paso es habilitar fuentes desconocidas en su dispositivo Android. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y enciéndala.
-
Paso 2: Descargar el archivo mod APK
-
El siguiente paso es descargar el archivo mod APK de Demolition Derby 3 de una fuente confiable. Puedes usar el siguiente enlace para descargarlo directamente a tu dispositivo.
El tercer paso es instalar el archivo mod APK de Demolition Derby 3 en su dispositivo. Para hacer esto, busque el archivo descargado en su administrador de archivos, toque en él y siga las instrucciones en la pantalla.
-
Paso 4: Iniciar el juego y disfrutar de
-
-
Conclusión
-
Demolition Derby 3 es un divertido y emocionante juego de destrucción de coches que te permite conducir tu coche en otros coches y causar tanto daño como sea posible. También puede personalizar su coche con diferentes piezas y trabajos de pintura, y competir con otros jugadores en línea o fuera de línea. Sin embargo, si desea disfrutar del juego más, es posible que desee descargar la versión mod APK del juego que le da dinero ilimitado, todos los coches desbloqueados, y sin anuncios. En este artículo, te hemos dicho todo lo que necesitas saber sobre Demolition Derby 3, y cómo descargar e instalar su versión mod APK. Esperamos que este artículo fue útil para usted, y que usted tendrá un gran tiempo jugando Demolition Derby 3.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Demolition Derby 3 Mod APK:
-
-
¿Es Demolition Derby 3 Mod APK seguro de usar?
-
Sí, Demolición Derby 3 Mod APK es seguro de usar siempre y cuando se descarga de una fuente de confianza como la nuestra. No contiene ningún virus o malware que pueda dañar su dispositivo o datos. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas, y asegúrese de que tiene una copia de seguridad de sus datos en caso de que algo salga mal.
-
¿Es Demolition Derby 3 Mod APK compatible con mi dispositivo?
-
Demolición Derby 3 Mod APK es compatible con la mayoría de los dispositivos Android que se ejecutan en Android 4.4 o superior. Sin embargo, algunos dispositivos pueden no soportar el juego o el mod APK debido a diferentes especificaciones o ajustes. Si encuentras algún problema mientras juegas el juego o instalas el mod APK, puedes intentar cambiar la configuración del dispositivo, actualizar el software del dispositivo o contactar al desarrollador del juego para obtener ayuda.
-
¿Puedo jugar Demolition Derby 3 Mod APK en línea con otros jugadores?
-
-
¿Puedo actualizar Demolition Derby 3 Mod APK a la última versión?
-
Sí, puede actualizar Demolition Derby 3 Mod APK a la última versión cada vez que hay una nueva actualización disponible. Sin embargo, no debes actualizar el juego desde Google Play, ya que esto sobreescribirá la versión mod APK y eliminará todos los beneficios que tengas. En su lugar, usted debe descargar la última versión mod APK de nuestro sitio web, e instalarlo sobre el existente. De esta manera, mantendrás todo tu progreso y beneficios en el juego.
-
¿Puedo solicitar más características o mods para Demolition Derby 3 Mod APK?
-
Sí, puede solicitar más características o mods para Demolition Derby 3 Mod APK dejando un comentario en nuestro sitio web. Haremos todo lo posible para satisfacer sus solicitudes y proporcionarle la mejor experiencia de juego posible. Sin embargo, no podemos garantizar que podamos agregar todas las características o mods que desee, ya que algunos de ellos pueden ser demasiado difíciles o imposibles de implementar. Agradecemos su comprensión y apoyo.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/translate.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/translate.py
deleted file mode 100644
index ecfe3bcaf46629a3d75bef81a60ac45e76cfa4db..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/translate.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
-# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"). You
-# may not use this file except in compliance with the License. A copy of
-# the License is located at
-#
-# http://aws.amazon.com/apache2.0/
-#
-# or in the "license" file accompanying this file. This file is
-# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific
-# language governing permissions and limitations under the License.
-import copy
-
-from botocore.utils import merge_dicts
-
-
-def build_retry_config(
- endpoint_prefix, retry_model, definitions, client_retry_config=None
-):
- service_config = retry_model.get(endpoint_prefix, {})
- resolve_references(service_config, definitions)
- # We want to merge the global defaults with the service specific
- # defaults, with the service specific defaults taking precedence.
- # So we use the global defaults as the base.
- #
- # A deepcopy is done on the retry defaults because it ensures the
- # retry model has no chance of getting mutated when the service specific
- # configuration or client retry config is merged in.
- final_retry_config = {
- '__default__': copy.deepcopy(retry_model.get('__default__', {}))
- }
- resolve_references(final_retry_config, definitions)
- # The merge the service specific config on top.
- merge_dicts(final_retry_config, service_config)
- if client_retry_config is not None:
- _merge_client_retry_config(final_retry_config, client_retry_config)
- return final_retry_config
-
-
-def _merge_client_retry_config(retry_config, client_retry_config):
- max_retry_attempts_override = client_retry_config.get('max_attempts')
- if max_retry_attempts_override is not None:
- # In the retry config, the max_attempts refers to the maximum number
- # of requests in general will be made. However, for the client's
- # retry config it refers to how many retry attempts will be made at
- # most. So to translate this number from the client config, one is
- # added to convert it to the maximum number request that will be made
- # by including the initial request.
- #
- # It is also important to note that if we ever support per operation
- # configuration in the retry model via the client, we will need to
- # revisit this logic to make sure max_attempts gets applied
- # per operation.
- retry_config['__default__']['max_attempts'] = (
- max_retry_attempts_override + 1
- )
-
-
-def resolve_references(config, definitions):
- """Recursively replace $ref keys.
-
- To cut down on duplication, common definitions can be declared
- (and passed in via the ``definitions`` attribute) and then
- references as {"$ref": "name"}, when this happens the reference
- dict is placed with the value from the ``definition`` dict.
-
- This is recursively done.
-
- """
- for key, value in config.items():
- if isinstance(value, dict):
- if len(value) == 1 and list(value.keys())[0] == '$ref':
- # Then we need to resolve this reference.
- config[key] = definitions[list(value.values())[0]]
- else:
- resolve_references(value, definitions)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py
deleted file mode 100644
index 8765b907d70c4a530bc90dc88f24b3df73473b01..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-This module provides means to detect the App Engine environment.
-"""
-
-import os
-
-
-def is_appengine():
- return is_local_appengine() or is_prod_appengine()
-
-
-def is_appengine_sandbox():
- """Reports if the app is running in the first generation sandbox.
-
- The second generation runtimes are technically still in a sandbox, but it
- is much less restrictive, so generally you shouldn't need to check for it.
- see https://cloud.google.com/appengine/docs/standard/runtimes
- """
- return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
-
-
-def is_local_appengine():
- return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
- "SERVER_SOFTWARE", ""
- ).startswith("Development/")
-
-
-def is_prod_appengine():
- return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
- "SERVER_SOFTWARE", ""
- ).startswith("Google App Engine/")
-
-
-def is_prod_appengine_mvms():
- """Deprecated."""
- return False
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/gqa/eval/result_eval.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/gqa/eval/result_eval.py
deleted file mode 100644
index 40a5cfc5dd0f149f48bb1a96a883d595f15d44ce..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/gqa/eval/result_eval.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# --------------------------------------------------------
-# OpenVQA
-# Written by Yuhao Cui https://github.com/cuiyuhao1996
-# --------------------------------------------------------
-
-from openvqa.datasets.gqa.eval.gqa_eval import GQAEval
-import json, pickle
-import numpy as np
-
-
-def eval(__C, dataset, ans_ix_list, pred_list, result_eval_file, ensemble_file, log_file, valid=False):
- result_eval_file = result_eval_file + '.json'
-
- qid_list = [qid for qid in dataset.qid_list]
- ans_size = dataset.ans_size
-
- result = [{
- 'questionId': qid_list[ix],
- 'prediction': dataset.ix_to_ans[str(ans_ix_list[ix])],
- } for ix in range(len(qid_list))]
-
- print('Save the result to file: {}'.format(result_eval_file))
- json.dump(result, open(result_eval_file, 'w'))
-
- if __C.TEST_SAVE_PRED:
- print('Save the prediction vector to file: {}'.format(ensemble_file))
-
- pred_list = np.array(pred_list).reshape(-1, ans_size)
- result_pred = [{
- 'pred': pred_list[qix],
- 'qid': int(qid_list[qix])
- } for qix in range(qid_list.__len__())]
- pickle.dump(result_pred, open(ensemble_file, 'wb+'), protocol=-1)
-
-
- if valid:
- # create vqa object and vqaRes object
- ques_file_path = __C.RAW_PATH[__C.DATASET][__C.SPLIT['val']]
- choices_path = None
- if __C.SPLIT['val'] + '_choices' in __C.RAW_PATH[__C.DATASET]:
- choices_path = __C.RAW_PATH[__C.DATASET][__C.SPLIT['val'] + '_choices']
-
- eval_gqa = GQAEval(__C, result_eval_file, ques_file_path, choices_path, EVAL_CONSISTENCY=False)
- result_string, detail_result_string = eval_gqa.get_str_result()
-
- print('Write to log file: {}'.format(log_file))
- logfile = open(log_file, 'a+')
-
- for result_string_ in result_string:
- logfile.write(result_string_)
- logfile.write('\n')
- print(result_string_)
-
- for detail_result_string_ in detail_result_string:
- logfile.write(detail_result_string_)
- logfile.write("\n")
-
- logfile.write('\n')
- logfile.close()
-
-
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/exec.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/exec.py
deleted file mode 100644
index 7d6f010d668d39fb729dcab457782ef3459ba5e8..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/exec.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# --------------------------------------------------------
-# OpenVQA
-# Written by Yuhao Cui https://github.com/cuiyuhao1996
-# Modified to add trojan result extraction options
-# --------------------------------------------------------
-
-import os, copy
-from openvqa.datasets.dataset_loader import DatasetLoader
-from utils.train_engine import train_engine
-from utils.test_engine import test_engine
-from utils.extract_engine import extract_engine
-
-class Execution:
- def __init__(self, __C):
- self.__C = __C
-
- if __C.RUN_MODE != 'extract':
- print('Loading dataset........')
- self.dataset = DatasetLoader(__C).DataSet()
-
- # If trigger the evaluation after every epoch
- # Will create a new cfgs with RUN_MODE = 'val'
- self.dataset_eval = None
- if __C.EVAL_EVERY_EPOCH:
- __C_eval = copy.deepcopy(__C)
- setattr(__C_eval, 'RUN_MODE', 'val')
- # modification - force eval set to clean when in train mode
- setattr(__C_eval, 'VER', 'clean')
-
- print('Loading validation set for per-epoch evaluation........')
- self.dataset_eval = DatasetLoader(__C_eval).DataSet()
-
-
- def run(self, run_mode):
- if run_mode == 'train':
- if self.__C.RESUME is False:
- self.empty_log(self.__C.VERSION)
- train_engine(self.__C, self.dataset, self.dataset_eval)
-
- elif run_mode == 'val':
- test_engine(self.__C, self.dataset, validation=True)
-
- elif run_mode == 'test':
- test_engine(self.__C, self.dataset)
-
- elif run_mode == 'extract':
- extract_engine(self.__C)
-
- else:
- exit(-1)
-
-
- def empty_log(self, version):
- print('Initializing log file........')
- if (os.path.exists(self.__C.LOG_PATH + '/log_run_' + version + '.txt')):
- os.remove(self.__C.LOG_PATH + '/log_run_' + version + '.txt')
- print('Finished!')
- print('')
diff --git a/spaces/CVPR/LIVE/pybind11/tools/pybind11NewTools.cmake b/spaces/CVPR/LIVE/pybind11/tools/pybind11NewTools.cmake
deleted file mode 100644
index 8f771acd243a3a1ff5338a8aac88b3aae274bc06..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/pybind11/tools/pybind11NewTools.cmake
+++ /dev/null
@@ -1,203 +0,0 @@
-# tools/pybind11NewTools.cmake -- Build system for the pybind11 modules
-#
-# Copyright (c) 2020 Wenzel Jakob and Henry Schreiner
-#
-# All rights reserved. Use of this source code is governed by a
-# BSD-style license that can be found in the LICENSE file.
-
-get_property(
- is_config
- TARGET pybind11::headers
- PROPERTY IMPORTED)
-
-if(pybind11_FIND_QUIETLY)
- set(_pybind11_quiet QUIET)
-endif()
-
-if(CMAKE_VERSION VERSION_LESS 3.12)
- message(FATAL_ERROR "You cannot use the new FindPython module with CMake < 3.12")
-endif()
-
-if(NOT Python_FOUND
- AND NOT Python3_FOUND
- AND NOT Python2_FOUND)
- if(NOT DEFINED Python_FIND_IMPLEMENTATIONS)
- set(Python_FIND_IMPLEMENTATIONS CPython PyPy)
- endif()
-
- # GitHub Actions like activation
- if(NOT DEFINED Python_ROOT_DIR AND DEFINED ENV{pythonLocation})
- set(Python_ROOT_DIR "$ENV{pythonLocation}")
- endif()
-
- find_package(Python REQUIRED COMPONENTS Interpreter Development ${_pybind11_quiet})
-
- # If we are in submodule mode, export the Python targets to global targets.
- # If this behavior is not desired, FindPython _before_ pybind11.
- if(NOT is_config)
- set_property(TARGET Python::Python PROPERTY IMPORTED_GLOBAL TRUE)
- set_property(TARGET Python::Interpreter PROPERTY IMPORTED_GLOBAL TRUE)
- if(TARGET Python::Module)
- set_property(TARGET Python::Module PROPERTY IMPORTED_GLOBAL TRUE)
- endif()
- endif()
-endif()
-
-if(Python_FOUND)
- set(_Python
- Python
- CACHE INTERNAL "" FORCE)
-elseif(Python3_FOUND AND NOT Python2_FOUND)
- set(_Python
- Python3
- CACHE INTERNAL "" FORCE)
-elseif(Python2_FOUND AND NOT Python3_FOUND)
- set(_Python
- Python2
- CACHE INTERNAL "" FORCE)
-else()
- message(AUTHOR_WARNING "Python2 and Python3 both present, pybind11 in "
- "PYBIND11_NOPYTHON mode (manually activate to silence warning)")
- set(_pybind11_nopython ON)
- return()
-endif()
-
-if(PYBIND11_MASTER_PROJECT)
- if(${_Python}_INTERPRETER_ID MATCHES "PyPy")
- message(STATUS "PyPy ${${_Python}_PyPy_VERSION} (Py ${${_Python}_VERSION})")
- else()
- message(STATUS "${_Python} ${${_Python}_VERSION}")
- endif()
-endif()
-
-# Debug check - see https://stackoverflow.com/questions/646518/python-how-to-detect-debug-Interpreter
-execute_process(COMMAND ${_Python}::Python -c "import sys; print(hasattr(sys, 'gettotalrefcount'))"
- OUTPUT_VARIABLE PYTHON_IS_DEBUG)
-
-# Python debug libraries expose slightly different objects before 3.8
-# https://docs.python.org/3.6/c-api/intro.html#debugging-builds
-# https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
-if(PYTHON_IS_DEBUG)
- set_property(
- TARGET pybind11::pybind11
- APPEND
- PROPERTY INTERFACE_COMPILE_DEFINITIONS Py_DEBUG)
-endif()
-
-# Check on every access - since Python2 and Python3 could have been used - do nothing in that case.
-
-if(DEFINED ${_Python}_INCLUDE_DIRS)
- set_property(
- TARGET pybind11::pybind11
- APPEND
- PROPERTY INTERFACE_INCLUDE_DIRECTORIES $)
-endif()
-
-if(DEFINED ${_Python}_VERSION AND ${_Python}_VERSION VERSION_LESS 3)
- set_property(
- TARGET pybind11::pybind11
- APPEND
- PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python2_no_register)
-endif()
-
-# In CMake 3.18+, you can find these separately, so include an if
-if(TARGET ${_Python}::${_Python})
- set_property(
- TARGET pybind11::embed
- APPEND
- PROPERTY INTERFACE_LINK_LIBRARIES ${_Python}::${_Python})
-endif()
-
-# CMake 3.15+ has this
-if(TARGET ${_Python}::Module)
- set_property(
- TARGET pybind11::module
- APPEND
- PROPERTY INTERFACE_LINK_LIBRARIES ${_Python}::Module)
-else()
- set_property(
- TARGET pybind11::module
- APPEND
- PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python_link_helper)
-endif()
-
-function(pybind11_add_module target_name)
- cmake_parse_arguments(PARSE_ARGV 1 ARG "STATIC;SHARED;MODULE;THIN_LTO;NO_EXTRAS" "" "")
-
- if(ARG_ADD_LIBRARY_STATIC)
- set(type STATIC)
- elseif(ARG_ADD_LIBRARY_SHARED)
- set(type SHARED)
- else()
- set(type MODULE)
- endif()
-
- if("${_Python}" STREQUAL "Python")
- python_add_library(${target_name} ${type} WITH_SOABI ${ARG_UNPARSED_ARGUMENTS})
- elseif("${_Python}" STREQUAL "Python3")
- python3_add_library(${target_name} ${type} WITH_SOABI ${ARG_UNPARSED_ARGUMENTS})
- elseif("${_Python}" STREQUAL "Python2")
- python2_add_library(${target_name} ${type} WITH_SOABI ${ARG_UNPARSED_ARGUMENTS})
- else()
- message(FATAL_ERROR "Cannot detect FindPython version: ${_Python}")
- endif()
-
- target_link_libraries(${target_name} PRIVATE pybind11::headers)
-
- if(type STREQUAL "MODULE")
- target_link_libraries(${target_name} PRIVATE pybind11::module)
- else()
- target_link_libraries(${target_name} PRIVATE pybind11::embed)
- endif()
-
- if(MSVC)
- target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
- endif()
-
- if(DEFINED ${_Python}_VERSION AND ${_Python}_VERSION VERSION_LESS 3)
- target_link_libraries(${target_name} PRIVATE pybind11::python2_no_register)
- endif()
-
- set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden"
- CUDA_VISIBILITY_PRESET "hidden")
-
- if(ARG_NO_EXTRAS)
- return()
- endif()
-
- if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
- if(ARG_THIN_LTO)
- target_link_libraries(${target_name} PRIVATE pybind11::thin_lto)
- else()
- target_link_libraries(${target_name} PRIVATE pybind11::lto)
- endif()
- endif()
-
- if(NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo)
- # Strip unnecessary sections of the binary on Linux/Mac OS
- pybind11_strip(${target_name})
- endif()
-
- if(MSVC)
- target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
- endif()
-endfunction()
-
-function(pybind11_extension name)
- set_property(TARGET ${name} PROPERTY PREFIX "")
-
- if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
- set_property(TARGET ${name} PROPERTY SUFFIX ".pyd")
- endif()
-
- if(${_Python}_SOABI)
- get_property(
- suffix
- TARGET ${name}
- PROPERTY SUFFIX)
- if(NOT suffix)
- set(suffix "${CMAKE_SHARED_MODULE_SUFFIX}")
- endif()
- set_property(TARGET ${name} PROPERTY SUFFIX ".${${_Python}_SOABI}${suffix}")
- endif()
-endfunction()
diff --git a/spaces/CVPR/WALT/mmdet/utils/collect_env.py b/spaces/CVPR/WALT/mmdet/utils/collect_env.py
deleted file mode 100644
index 89c064accdb10abec4a03de04f601d27aab2da70..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/utils/collect_env.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from mmcv.utils import collect_env as collect_base_env
-from mmcv.utils import get_git_hash
-
-import mmdet
-
-
-def collect_env():
- """Collect the information of the running environments."""
- env_info = collect_base_env()
- env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
- return env_info
-
-
-if __name__ == '__main__':
- for name, val in collect_env().items():
- print(f'{name}: {val}')
diff --git a/spaces/CVPR/lama-example/saicinpainting/evaluation/evaluator.py b/spaces/CVPR/lama-example/saicinpainting/evaluation/evaluator.py
deleted file mode 100644
index aa9e80402633c08a580929b38a5cb695cb7171d8..0000000000000000000000000000000000000000
--- a/spaces/CVPR/lama-example/saicinpainting/evaluation/evaluator.py
+++ /dev/null
@@ -1,220 +0,0 @@
-import logging
-import math
-from typing import Dict
-
-import numpy as np
-import torch
-import torch.nn as nn
-import tqdm
-from torch.utils.data import DataLoader
-
-from saicinpainting.evaluation.utils import move_to_device
-
-LOGGER = logging.getLogger(__name__)
-
-
-class InpaintingEvaluator():
- def __init__(self, dataset, scores, area_grouping=True, bins=10, batch_size=32, device='cuda',
- integral_func=None, integral_title=None, clamp_image_range=None):
- """
- :param dataset: torch.utils.data.Dataset which contains images and masks
- :param scores: dict {score_name: EvaluatorScore object}
- :param area_grouping: in addition to the overall scores, allows to compute score for the groups of samples
- which are defined by share of area occluded by mask
- :param bins: number of groups, partition is generated by np.linspace(0., 1., bins + 1)
- :param batch_size: batch_size for the dataloader
- :param device: device to use
- """
- self.scores = scores
- self.dataset = dataset
-
- self.area_grouping = area_grouping
- self.bins = bins
-
- self.device = torch.device(device)
-
- self.dataloader = DataLoader(self.dataset, shuffle=False, batch_size=batch_size)
-
- self.integral_func = integral_func
- self.integral_title = integral_title
- self.clamp_image_range = clamp_image_range
-
- def _get_bin_edges(self):
- bin_edges = np.linspace(0, 1, self.bins + 1)
-
- num_digits = max(0, math.ceil(math.log10(self.bins)) - 1)
- interval_names = []
- for idx_bin in range(self.bins):
- start_percent, end_percent = round(100 * bin_edges[idx_bin], num_digits), \
- round(100 * bin_edges[idx_bin + 1], num_digits)
- start_percent = '{:.{n}f}'.format(start_percent, n=num_digits)
- end_percent = '{:.{n}f}'.format(end_percent, n=num_digits)
- interval_names.append("{0}-{1}%".format(start_percent, end_percent))
-
- groups = []
- for batch in self.dataloader:
- mask = batch['mask']
- batch_size = mask.shape[0]
- area = mask.to(self.device).reshape(batch_size, -1).mean(dim=-1)
- bin_indices = np.searchsorted(bin_edges, area.detach().cpu().numpy(), side='right') - 1
- # corner case: when area is equal to 1, bin_indices should return bins - 1, not bins for that element
- bin_indices[bin_indices == self.bins] = self.bins - 1
- groups.append(bin_indices)
- groups = np.hstack(groups)
-
- return groups, interval_names
-
- def evaluate(self, model=None):
- """
- :param model: callable with signature (image_batch, mask_batch); should return inpainted_batch
- :return: dict with (score_name, group_type) as keys, where group_type can be either 'overall' or
- name of the particular group arranged by area of mask (e.g. '10-20%')
- and score statistics for the group as values.
- """
- results = dict()
- if self.area_grouping:
- groups, interval_names = self._get_bin_edges()
- else:
- groups = None
-
- for score_name, score in tqdm.auto.tqdm(self.scores.items(), desc='scores'):
- score.to(self.device)
- with torch.no_grad():
- score.reset()
- for batch in tqdm.auto.tqdm(self.dataloader, desc=score_name, leave=False):
- batch = move_to_device(batch, self.device)
- image_batch, mask_batch = batch['image'], batch['mask']
- if self.clamp_image_range is not None:
- image_batch = torch.clamp(image_batch,
- min=self.clamp_image_range[0],
- max=self.clamp_image_range[1])
- if model is None:
- assert 'inpainted' in batch, \
- 'Model is None, so we expected precomputed inpainting results at key "inpainted"'
- inpainted_batch = batch['inpainted']
- else:
- inpainted_batch = model(image_batch, mask_batch)
- score(inpainted_batch, image_batch, mask_batch)
- total_results, group_results = score.get_value(groups=groups)
-
- results[(score_name, 'total')] = total_results
- if groups is not None:
- for group_index, group_values in group_results.items():
- group_name = interval_names[group_index]
- results[(score_name, group_name)] = group_values
-
- if self.integral_func is not None:
- results[(self.integral_title, 'total')] = dict(mean=self.integral_func(results))
-
- return results
-
-
-def ssim_fid100_f1(metrics, fid_scale=100):
- ssim = metrics[('ssim', 'total')]['mean']
- fid = metrics[('fid', 'total')]['mean']
- fid_rel = max(0, fid_scale - fid) / fid_scale
- f1 = 2 * ssim * fid_rel / (ssim + fid_rel + 1e-3)
- return f1
-
-
-def lpips_fid100_f1(metrics, fid_scale=100):
- neg_lpips = 1 - metrics[('lpips', 'total')]['mean'] # invert, so bigger is better
- fid = metrics[('fid', 'total')]['mean']
- fid_rel = max(0, fid_scale - fid) / fid_scale
- f1 = 2 * neg_lpips * fid_rel / (neg_lpips + fid_rel + 1e-3)
- return f1
-
-
-
-class InpaintingEvaluatorOnline(nn.Module):
- def __init__(self, scores, bins=10, image_key='image', inpainted_key='inpainted',
- integral_func=None, integral_title=None, clamp_image_range=None):
- """
- :param scores: dict {score_name: EvaluatorScore object}
- :param bins: number of groups, partition is generated by np.linspace(0., 1., bins + 1)
- :param device: device to use
- """
- super().__init__()
- LOGGER.info(f'{type(self)} init called')
- self.scores = nn.ModuleDict(scores)
- self.image_key = image_key
- self.inpainted_key = inpainted_key
- self.bins_num = bins
- self.bin_edges = np.linspace(0, 1, self.bins_num + 1)
-
- num_digits = max(0, math.ceil(math.log10(self.bins_num)) - 1)
- self.interval_names = []
- for idx_bin in range(self.bins_num):
- start_percent, end_percent = round(100 * self.bin_edges[idx_bin], num_digits), \
- round(100 * self.bin_edges[idx_bin + 1], num_digits)
- start_percent = '{:.{n}f}'.format(start_percent, n=num_digits)
- end_percent = '{:.{n}f}'.format(end_percent, n=num_digits)
- self.interval_names.append("{0}-{1}%".format(start_percent, end_percent))
-
- self.groups = []
-
- self.integral_func = integral_func
- self.integral_title = integral_title
- self.clamp_image_range = clamp_image_range
-
- LOGGER.info(f'{type(self)} init done')
-
- def _get_bins(self, mask_batch):
- batch_size = mask_batch.shape[0]
- area = mask_batch.view(batch_size, -1).mean(dim=-1).detach().cpu().numpy()
- bin_indices = np.clip(np.searchsorted(self.bin_edges, area) - 1, 0, self.bins_num - 1)
- return bin_indices
-
- def forward(self, batch: Dict[str, torch.Tensor]):
- """
- Calculate and accumulate metrics for batch. To finalize evaluation and obtain final metrics, call evaluation_end
- :param batch: batch dict with mandatory fields mask, image, inpainted (can be overriden by self.inpainted_key)
- """
- result = {}
- with torch.no_grad():
- image_batch, mask_batch, inpainted_batch = batch[self.image_key], batch['mask'], batch[self.inpainted_key]
- if self.clamp_image_range is not None:
- image_batch = torch.clamp(image_batch,
- min=self.clamp_image_range[0],
- max=self.clamp_image_range[1])
- self.groups.extend(self._get_bins(mask_batch))
-
- for score_name, score in self.scores.items():
- result[score_name] = score(inpainted_batch, image_batch, mask_batch)
- return result
-
- def process_batch(self, batch: Dict[str, torch.Tensor]):
- return self(batch)
-
- def evaluation_end(self, states=None):
- """:return: dict with (score_name, group_type) as keys, where group_type can be either 'overall' or
- name of the particular group arranged by area of mask (e.g. '10-20%')
- and score statistics for the group as values.
- """
- LOGGER.info(f'{type(self)}: evaluation_end called')
-
- self.groups = np.array(self.groups)
-
- results = {}
- for score_name, score in self.scores.items():
- LOGGER.info(f'Getting value of {score_name}')
- cur_states = [s[score_name] for s in states] if states is not None else None
- total_results, group_results = score.get_value(groups=self.groups, states=cur_states)
- LOGGER.info(f'Getting value of {score_name} done')
- results[(score_name, 'total')] = total_results
-
- for group_index, group_values in group_results.items():
- group_name = self.interval_names[group_index]
- results[(score_name, group_name)] = group_values
-
- if self.integral_func is not None:
- results[(self.integral_title, 'total')] = dict(mean=self.integral_func(results))
-
- LOGGER.info(f'{type(self)}: reset scores')
- self.groups = []
- for sc in self.scores.values():
- sc.reset()
- LOGGER.info(f'{type(self)}: reset scores done')
-
- LOGGER.info(f'{type(self)}: evaluation_end done')
- return results
diff --git a/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/vision.cpp b/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/vision.cpp
deleted file mode 100644
index f6c049f7b4970b5ab88bf4bea5c5cf95897da0f7..0000000000000000000000000000000000000000
--- a/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/vision.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates.
-
-#include
-#include "ROIAlignRotated/ROIAlignRotated.h"
-#include "box_iou_rotated/box_iou_rotated.h"
-#include "cocoeval/cocoeval.h"
-#include "deformable/deform_conv.h"
-#include "nms_rotated/nms_rotated.h"
-
-namespace detectron2 {
-
-#if defined(WITH_CUDA) || defined(WITH_HIP)
-extern int get_cudart_version();
-#endif
-
-std::string get_cuda_version() {
-#if defined(WITH_CUDA) || defined(WITH_HIP)
- std::ostringstream oss;
-
-#if defined(WITH_CUDA)
- oss << "CUDA ";
-#else
- oss << "HIP ";
-#endif
-
- // copied from
- // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231
- auto printCudaStyleVersion = [&](int v) {
- oss << (v / 1000) << "." << (v / 10 % 100);
- if (v % 10 != 0) {
- oss << "." << (v % 10);
- }
- };
- printCudaStyleVersion(get_cudart_version());
- return oss.str();
-#else // neither CUDA nor HIP
- return std::string("not available");
-#endif
-}
-
-bool has_cuda() {
-#if defined(WITH_CUDA)
- return true;
-#else
- return false;
-#endif
-}
-
-// similar to
-// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp
-std::string get_compiler_version() {
- std::ostringstream ss;
-#if defined(__GNUC__)
-#ifndef __clang__
-
-#if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8))
-#error "GCC >= 4.9 is required!"
-#endif
-
- { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; }
-#endif
-#endif
-
-#if defined(__clang_major__)
- {
- ss << "clang " << __clang_major__ << "." << __clang_minor__ << "."
- << __clang_patchlevel__;
- }
-#endif
-
-#if defined(_MSC_VER)
- { ss << "MSVC " << _MSC_FULL_VER; }
-#endif
- return ss.str();
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("get_compiler_version", &get_compiler_version, "get_compiler_version");
- m.def("get_cuda_version", &get_cuda_version, "get_cuda_version");
- m.def("has_cuda", &has_cuda, "has_cuda");
-
- m.def("box_iou_rotated", &box_iou_rotated, "IoU for rotated boxes");
-
- m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward");
- m.def(
- "deform_conv_backward_input",
- &deform_conv_backward_input,
- "deform_conv_backward_input");
- m.def(
- "deform_conv_backward_filter",
- &deform_conv_backward_filter,
- "deform_conv_backward_filter");
- m.def(
- "modulated_deform_conv_forward",
- &modulated_deform_conv_forward,
- "modulated_deform_conv_forward");
- m.def(
- "modulated_deform_conv_backward",
- &modulated_deform_conv_backward,
- "modulated_deform_conv_backward");
-
- m.def("nms_rotated", &nms_rotated, "NMS for rotated boxes");
-
- m.def(
- "roi_align_rotated_forward",
- &ROIAlignRotated_forward,
- "Forward pass for Rotated ROI-Align Operator");
- m.def(
- "roi_align_rotated_backward",
- &ROIAlignRotated_backward,
- "Backward pass for Rotated ROI-Align Operator");
-
- m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate");
- m.def(
- "COCOevalEvaluateImages",
- &COCOeval::EvaluateImages,
- "COCOeval::EvaluateImages");
- pybind11::class_(m, "InstanceAnnotation")
- .def(pybind11::init());
- pybind11::class_(m, "ImageEvaluation")
- .def(pybind11::init<>());
-}
-
-#ifdef TORCH_LIBRARY
-TORCH_LIBRARY(detectron2, m) {
- m.def("nms_rotated", &nms_rotated);
-}
-#endif
-} // namespace detectron2
diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/local.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/local.py
deleted file mode 100644
index 803b6dc6ebb430285f423cda592fa3e902e9a4a6..0000000000000000000000000000000000000000
--- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/local.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from __future__ import annotations
-
-import dataclasses
-import os
-from typing import Any, List
-
-import numpy as np
-import orjson
-
-from autogpt.llm_utils import create_embedding_with_ada
-from autogpt.memory.base import MemoryProviderSingleton
-
-EMBED_DIM = 1536
-SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
-
-
-def create_default_embeddings():
- return np.zeros((0, EMBED_DIM)).astype(np.float32)
-
-
-@dataclasses.dataclass
-class CacheContent:
- texts: List[str] = dataclasses.field(default_factory=list)
- embeddings: np.ndarray = dataclasses.field(
- default_factory=create_default_embeddings
- )
-
-
-class LocalCache(MemoryProviderSingleton):
- """A class that stores the memory in a local file"""
-
- def __init__(self, cfg) -> None:
- """Initialize a class instance
-
- Args:
- cfg: Config object
-
- Returns:
- None
- """
- self.filename = f"{cfg.memory_index}.json"
- if os.path.exists(self.filename):
- try:
- with open(self.filename, "w+b") as f:
- file_content = f.read()
- if not file_content.strip():
- file_content = b"{}"
- f.write(file_content)
-
- loaded = orjson.loads(file_content)
- self.data = CacheContent(**loaded)
- except orjson.JSONDecodeError:
- print(f"Error: The file '{self.filename}' is not in JSON format.")
- self.data = CacheContent()
- else:
- print(
- f"Warning: The file '{self.filename}' does not exist. "
- "Local memory would not be saved to a file."
- )
- self.data = CacheContent()
-
- def add(self, text: str):
- """
- Add text to our list of texts, add embedding as row to our
- embeddings-matrix
-
- Args:
- text: str
-
- Returns: None
- """
- if "Command Error:" in text:
- return ""
- self.data.texts.append(text)
-
- embedding = create_embedding_with_ada(text)
-
- vector = np.array(embedding).astype(np.float32)
- vector = vector[np.newaxis, :]
- self.data.embeddings = np.concatenate(
- [
- self.data.embeddings,
- vector,
- ],
- axis=0,
- )
-
- with open(self.filename, "wb") as f:
- out = orjson.dumps(self.data, option=SAVE_OPTIONS)
- f.write(out)
- return text
-
- def clear(self) -> str:
- """
- Clears the redis server.
-
- Returns: A message indicating that the memory has been cleared.
- """
- self.data = CacheContent()
- return "Obliviated"
-
- def get(self, data: str) -> list[Any] | None:
- """
- Gets the data from the memory that is most relevant to the given data.
-
- Args:
- data: The data to compare to.
-
- Returns: The most relevant data.
- """
- return self.get_relevant(data, 1)
-
- def get_relevant(self, text: str, k: int) -> list[Any]:
- """ "
- matrix-vector mult to find score-for-each-row-of-matrix
- get indices for top-k winning scores
- return texts for those indices
- Args:
- text: str
- k: int
-
- Returns: List[str]
- """
- embedding = create_embedding_with_ada(text)
-
- scores = np.dot(self.data.embeddings, embedding)
-
- top_k_indices = np.argsort(scores)[-k:][::-1]
-
- return [self.data.texts[i] for i in top_k_indices]
-
- def get_stats(self) -> tuple[int, tuple[int, ...]]:
- """
- Returns: The stats of the local cache.
- """
- return len(self.data.texts), self.data.embeddings.shape
diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/webvid_datasets.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/webvid_datasets.py
deleted file mode 100644
index aaf6b9d6dff0d96b04d40a40c0051527f7d01842..0000000000000000000000000000000000000000
--- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/webvid_datasets.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-import os
-from video_llama.datasets.datasets.base_dataset import BaseDataset
-from video_llama.datasets.datasets.caption_datasets import CaptionDataset
-import pandas as pd
-import decord
-from decord import VideoReader
-import random
-import torch
-from torch.utils.data.dataloader import default_collate
-class WebvidDataset(BaseDataset):
- def __init__(self, vis_processor, text_processor, vis_root, ann_root):
- """
- vis_root (string): Root directory of video (e.g. webvid_eval/video/)
- ann_root (string): Root directory of video (e.g. webvid_eval/annotations/)
- split (string): val or test
- """
- super().__init__(vis_processor=vis_processor, text_processor=text_processor)
-
-
- # 读取一个路径下所有的
-
- ts_df = []
- for file_name in os.listdir(ann_root):
- if file_name.endswith('.csv'):
- df = pd.read_csv(os.path.join(ann_root, file_name))
- ts_df.append(df)
-
- merged_df = pd.concat(ts_df)
- self.annotation = merged_df
- self.vis_root = vis_root
- self.resize_size = 224
- self.num_frm = 8
- self.frm_sampling_strategy = 'headtail'
-
- def _get_video_path(self, sample):
- rel_video_fp = os.path.join(sample['page_dir'], str(sample['videoid']) + '.mp4')
- full_video_fp = os.path.join(self.vis_root, rel_video_fp)
- return full_video_fp
-
- def __getitem__(self, index):
- num_retries = 10 # skip error videos
- for _ in range(num_retries):
- sample = self.annotation.iloc[index]
- sample_dict = sample.to_dict()
- video_id = sample_dict['videoid']
-
- if 'name' in sample_dict.keys():
- text = sample_dict['name'].strip()
- else:
- raise NotImplementedError("Un-supported text annotation format.")
-
- # fetch video
- video_path = self._get_video_path(sample_dict)
- # if os.path.exists(video_path):
- try:
- video = self.vis_processor(video_path)
- except:
- print(f"Failed to load examples with video: {video_path}. "
- f"Will randomly sample an example as a replacement.")
- index = random.randint(0, len(self) - 1)
- continue
- caption = self.text_processor(text)
-
- # print(video.size())
- if video is None or caption is None \
- or video.size()!=torch.Size([3,self.vis_processor.n_frms,224,224]):
- print(f"Failed to load examples with video: {video_path}. "
- f"Will randomly sample an example as a replacement.")
- index = random.randint(0, len(self) - 1)
- continue
- else:
- break
- else:
- raise RuntimeError(f"Failed to fetch video after {num_retries} retries.")
- # "image_id" is kept to stay compatible with the COCO evaluation format
- return {
- "image": video,
- "text_input": caption,
- "type":'video',
- }
-
- def __len__(self):
- return len(self.annotation)
-
- # def collater(self, samples):
- # new_result = {}
- # new_result['image'] = default_collate( [sample["image"] for sample in samples])
- # new_result['text_input'] = default_collate( [sample["text_input"] for sample in samples])
- # return new_result
-
-class WebvidDatasetEvalDataset(BaseDataset):
- def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
- """
- vis_root (string): Root directory of images (e.g. coco/images/)
- ann_root (string): directory to store the annotation file
- split (string): val or test
- """
- super().__init__(vis_processor, text_processor, vis_root, ann_paths)
-
- def __getitem__(self, index):
-
- ann = self.annotation[index]
-
- vname = ann["video"]
- video_path = os.path.join(self.vis_root, vname)
-
- video = self.vis_processor(video_path)
-
- return {
- "video": video,
- "image_id": ann["image_id"],
- "instance_id": ann["instance_id"],
- }
-
-
diff --git a/spaces/DHEIVER/ThyroidTumorClassificationModel/app.py b/spaces/DHEIVER/ThyroidTumorClassificationModel/app.py
deleted file mode 100644
index f413ce114d8f2251e08f6099b659f60809c9ea00..0000000000000000000000000000000000000000
--- a/spaces/DHEIVER/ThyroidTumorClassificationModel/app.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import gradio as gr
-from transformers import AutoFeatureExtractor, AutoModelForImageClassification
-from PIL import Image
-import torch
-import datetime
-
-# Carregue o extrator de recursos e o modelo
-extractor = AutoFeatureExtractor.from_pretrained("SerdarHelli/ThyroidTumorClassificationModel")
-model = AutoModelForImageClassification.from_pretrained("SerdarHelli/ThyroidTumorClassificationModel")
-
-# Função para classificar a imagem
-def classify_image(image):
- # Pré-processa a imagem usando o extrator
- inputs = extractor(images=image, return_tensors="pt")
-
- # Passa a imagem pelo modelo
- outputs = model(**inputs)
-
- # Obtém as probabilidades das classes
- logits = outputs.logits
-
- # Calcula as probabilidades finais usando o softmax
- probabilities = torch.softmax(logits, dim=1)
-
- # Obtém a classe com a maior probabilidade
- predicted_class = torch.argmax(probabilities, dim=1).item()
-
- # Rótulos de classe personalizados com base no seu modelo
- class_labels = ["Sem Tumor", "Tumor"]
-
- # Rótulo da classe prevista
- predicted_label = class_labels[predicted_class]
-
- # Obtém a data e hora atual
- current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
-
- # Formate a saída em HTML com data e hora
- result_html = f"""
-
Resultado da Classificação
-
Classe Predita: {predicted_label}
-
Data e Hora: {current_time}
- """
-
- # Retorna o resultado formatado em HTML
- return result_html
-
-# Crie uma interface Gradio com detalhes sobre o Classificador de Tumor da Tireoide
-iface = gr.Interface(
- fn=classify_image,
- inputs=gr.inputs.Image(),
- outputs=gr.outputs.HTML(), # Saída formatada com HTML
- title="Classificador de Tumor da Tireoide",
- description="""
-
Este é um classificador de imagens de tumores da tireoide.
-
Para usá-lo:
-
-
Clique no botão 'Escolher Arquivo' para fazer o upload de uma imagem da tireoide.
-
Aguarde a classificação automática.
-
O resultado mostrará a classe predita e a data e hora da classificação.
-
-
Este classificador é baseado em um modelo pré-treinado e pode ajudar a identificar a presença de tumores da tireoide em imagens médicas.
- """,
-)
-
-# Inicie a interface Gradio
-iface.launch()
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/save.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/save.py
deleted file mode 100644
index 90d36f14bc5ebf5cb1e07cb469191ed21e4b3f4b..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/save.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import json
-import pathlib
-import warnings
-
-from .mimebundle import spec_to_mimebundle
-from ..vegalite.v5.data import data_transformers
-
-
-def write_file_or_filename(fp, content, mode="w", encoding=None):
- """Write content to fp, whether fp is a string, a pathlib Path or a
- file-like object"""
- if isinstance(fp, str) or isinstance(fp, pathlib.PurePath):
- with open(file=fp, mode=mode, encoding=encoding) as f:
- f.write(content)
- else:
- fp.write(content)
-
-
-def set_inspect_format_argument(format, fp, inline):
- """Inspect the format argument in the save function"""
- if format is None:
- if isinstance(fp, str):
- format = fp.split(".")[-1]
- elif isinstance(fp, pathlib.PurePath):
- format = fp.suffix.lstrip(".")
- else:
- raise ValueError(
- "must specify file format: "
- "['png', 'svg', 'pdf', 'html', 'json', 'vega']"
- )
-
- if format != "html" and inline:
- warnings.warn("inline argument ignored for non HTML formats.", stacklevel=1)
-
- return format
-
-
-def set_inspect_mode_argument(mode, embed_options, spec, vegalite_version):
- """Inspect the mode argument in the save function"""
- if mode is None:
- if "mode" in embed_options:
- mode = embed_options["mode"]
- elif "$schema" in spec:
- mode = spec["$schema"].split("/")[-2]
- else:
- mode = "vega-lite"
-
- if mode != "vega-lite":
- raise ValueError("mode must be 'vega-lite', " "not '{}'".format(mode))
-
- if mode == "vega-lite" and vegalite_version is None:
- raise ValueError("must specify vega-lite version")
-
- return mode
-
-
-def save(
- chart,
- fp,
- vega_version,
- vegaembed_version,
- format=None,
- mode=None,
- vegalite_version=None,
- embed_options=None,
- json_kwds=None,
- webdriver=None,
- scale_factor=1,
- engine=None,
- inline=False,
- **kwargs,
-):
- """Save a chart to file in a variety of formats
-
- Supported formats are [json, html, png, svg, pdf]
-
- Parameters
- ----------
- chart : alt.Chart
- the chart instance to save
- fp : string filename, pathlib.Path or file-like object
- file to which to write the chart.
- format : string (optional)
- the format to write: one of ['json', 'html', 'png', 'svg', 'pdf'].
- If not specified, the format will be determined from the filename.
- mode : string (optional)
- Must be 'vega-lite'. If not specified, then infer the mode from
- the '$schema' property of the spec, or the ``opt`` dictionary.
- If it's not specified in either of those places, then use 'vega-lite'.
- vega_version : string (optional)
- For html output, the version of vega.js to use
- vegalite_version : string (optional)
- For html output, the version of vegalite.js to use
- vegaembed_version : string (optional)
- For html output, the version of vegaembed.js to use
- embed_options : dict (optional)
- The vegaEmbed options dictionary. Default is {}
- (See https://github.com/vega/vega-embed for details)
- json_kwds : dict (optional)
- Additional keyword arguments are passed to the output method
- associated with the specified format.
- webdriver : string {'chrome' | 'firefox'} (optional)
- Webdriver to use for png or svg output
- scale_factor : float (optional)
- scale_factor to use to change size/resolution of png or svg output
- engine: string {'vl-convert', 'altair_saver'}
- the conversion engine to use for 'png', 'svg', and 'pdf' formats
- inline: bool (optional)
- If False (default), the required JavaScript libraries are loaded
- from a CDN location in the resulting html file.
- If True, the required JavaScript libraries are inlined into the resulting
- html file so that it will work without an internet connection.
- The altair_viewer package is required if True.
- **kwargs :
- additional kwargs passed to spec_to_mimebundle.
- """
- if json_kwds is None:
- json_kwds = {}
-
- if embed_options is None:
- embed_options = {}
-
- format = set_inspect_format_argument(format, fp, inline)
-
- # Temporarily turn off any data transformers so that all data is inlined
- # when calling chart.to_dict. This is relevant for vl-convert which cannot access
- # local json files which could be created by a json data transformer. Furthermore,
- # we don't exit the with statement until this function completed due to the issue
- # described at https://github.com/vega/vl-convert/issues/31
- with data_transformers.enable("default"), data_transformers.disable_max_rows():
- spec = chart.to_dict()
-
- mode = set_inspect_mode_argument(mode, embed_options, spec, vegalite_version)
-
- if format == "json":
- json_spec = json.dumps(spec, **json_kwds)
- write_file_or_filename(fp, json_spec, mode="w")
- elif format == "html":
- if inline:
- kwargs["template"] = "inline"
- mimebundle = spec_to_mimebundle(
- spec=spec,
- format=format,
- mode=mode,
- vega_version=vega_version,
- vegalite_version=vegalite_version,
- vegaembed_version=vegaembed_version,
- embed_options=embed_options,
- json_kwds=json_kwds,
- **kwargs,
- )
- write_file_or_filename(fp, mimebundle["text/html"], mode="w")
- elif format in ["png", "svg", "pdf", "vega"]:
- mimebundle = spec_to_mimebundle(
- spec=spec,
- format=format,
- mode=mode,
- vega_version=vega_version,
- vegalite_version=vegalite_version,
- vegaembed_version=vegaembed_version,
- webdriver=webdriver,
- scale_factor=scale_factor,
- engine=engine,
- **kwargs,
- )
- if format == "png":
- write_file_or_filename(fp, mimebundle["image/png"], mode="wb")
- elif format == "pdf":
- write_file_or_filename(fp, mimebundle["application/pdf"], mode="wb")
- else:
- encoding = kwargs.get("encoding", "utf-8")
- write_file_or_filename(
- fp, mimebundle["image/svg+xml"], mode="w", encoding=encoding
- )
- else:
- raise ValueError("Unsupported format: '{}'".format(format))
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/tz/tz.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/tz/tz.py
deleted file mode 100644
index c67f56d4659f17aab4540dfd42511bb850871a77..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/tz/tz.py
+++ /dev/null
@@ -1,1849 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This module offers timezone implementations subclassing the abstract
-:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format
-files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`,
-etc), TZ environment string (in all known formats), given ranges (with help
-from relative deltas), local machine timezone, fixed offset timezone, and UTC
-timezone.
-"""
-import datetime
-import struct
-import time
-import sys
-import os
-import bisect
-import weakref
-from collections import OrderedDict
-
-import six
-from six import string_types
-from six.moves import _thread
-from ._common import tzname_in_python2, _tzinfo
-from ._common import tzrangebase, enfold
-from ._common import _validate_fromutc_inputs
-
-from ._factories import _TzSingleton, _TzOffsetFactory
-from ._factories import _TzStrFactory
-try:
- from .win import tzwin, tzwinlocal
-except ImportError:
- tzwin = tzwinlocal = None
-
-# For warning about rounding tzinfo
-from warnings import warn
-
-ZERO = datetime.timedelta(0)
-EPOCH = datetime.datetime.utcfromtimestamp(0)
-EPOCHORDINAL = EPOCH.toordinal()
-
-
-@six.add_metaclass(_TzSingleton)
-class tzutc(datetime.tzinfo):
- """
- This is a tzinfo object that represents the UTC time zone.
-
- **Examples:**
-
- .. doctest::
-
- >>> from datetime import *
- >>> from dateutil.tz import *
-
- >>> datetime.now()
- datetime.datetime(2003, 9, 27, 9, 40, 1, 521290)
-
- >>> datetime.now(tzutc())
- datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc())
-
- >>> datetime.now(tzutc()).tzname()
- 'UTC'
-
- .. versionchanged:: 2.7.0
- ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will
- always return the same object.
-
- .. doctest::
-
- >>> from dateutil.tz import tzutc, UTC
- >>> tzutc() is tzutc()
- True
- >>> tzutc() is UTC
- True
- """
- def utcoffset(self, dt):
- return ZERO
-
- def dst(self, dt):
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- return "UTC"
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- return False
-
- @_validate_fromutc_inputs
- def fromutc(self, dt):
- """
- Fast track version of fromutc() returns the original ``dt`` object for
- any valid :py:class:`datetime.datetime` object.
- """
- return dt
-
- def __eq__(self, other):
- if not isinstance(other, (tzutc, tzoffset)):
- return NotImplemented
-
- return (isinstance(other, tzutc) or
- (isinstance(other, tzoffset) and other._offset == ZERO))
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s()" % self.__class__.__name__
-
- __reduce__ = object.__reduce__
-
-
-#: Convenience constant providing a :class:`tzutc()` instance
-#:
-#: .. versionadded:: 2.7.0
-UTC = tzutc()
-
-
-@six.add_metaclass(_TzOffsetFactory)
-class tzoffset(datetime.tzinfo):
- """
- A simple class for representing a fixed offset from UTC.
-
- :param name:
- The timezone name, to be returned when ``tzname()`` is called.
- :param offset:
- The time zone offset in seconds, or (since version 2.6.0, represented
- as a :py:class:`datetime.timedelta` object).
- """
- def __init__(self, name, offset):
- self._name = name
-
- try:
- # Allow a timedelta
- offset = offset.total_seconds()
- except (TypeError, AttributeError):
- pass
-
- self._offset = datetime.timedelta(seconds=_get_supported_offset(offset))
-
- def utcoffset(self, dt):
- return self._offset
-
- def dst(self, dt):
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- return self._name
-
- @_validate_fromutc_inputs
- def fromutc(self, dt):
- return dt + self._offset
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- return False
-
- def __eq__(self, other):
- if not isinstance(other, tzoffset):
- return NotImplemented
-
- return self._offset == other._offset
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s(%s, %s)" % (self.__class__.__name__,
- repr(self._name),
- int(self._offset.total_seconds()))
-
- __reduce__ = object.__reduce__
-
-
-class tzlocal(_tzinfo):
- """
- A :class:`tzinfo` subclass built around the ``time`` timezone functions.
- """
- def __init__(self):
- super(tzlocal, self).__init__()
-
- self._std_offset = datetime.timedelta(seconds=-time.timezone)
- if time.daylight:
- self._dst_offset = datetime.timedelta(seconds=-time.altzone)
- else:
- self._dst_offset = self._std_offset
-
- self._dst_saved = self._dst_offset - self._std_offset
- self._hasdst = bool(self._dst_saved)
- self._tznames = tuple(time.tzname)
-
- def utcoffset(self, dt):
- if dt is None and self._hasdst:
- return None
-
- if self._isdst(dt):
- return self._dst_offset
- else:
- return self._std_offset
-
- def dst(self, dt):
- if dt is None and self._hasdst:
- return None
-
- if self._isdst(dt):
- return self._dst_offset - self._std_offset
- else:
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- return self._tznames[self._isdst(dt)]
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- naive_dst = self._naive_is_dst(dt)
- return (not naive_dst and
- (naive_dst != self._naive_is_dst(dt - self._dst_saved)))
-
- def _naive_is_dst(self, dt):
- timestamp = _datetime_to_timestamp(dt)
- return time.localtime(timestamp + time.timezone).tm_isdst
-
- def _isdst(self, dt, fold_naive=True):
- # We can't use mktime here. It is unstable when deciding if
- # the hour near to a change is DST or not.
- #
- # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
- # dt.minute, dt.second, dt.weekday(), 0, -1))
- # return time.localtime(timestamp).tm_isdst
- #
- # The code above yields the following result:
- #
- # >>> import tz, datetime
- # >>> t = tz.tzlocal()
- # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
- # 'BRDT'
- # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
- # 'BRST'
- # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
- # 'BRST'
- # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
- # 'BRDT'
- # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
- # 'BRDT'
- #
- # Here is a more stable implementation:
- #
- if not self._hasdst:
- return False
-
- # Check for ambiguous times:
- dstval = self._naive_is_dst(dt)
- fold = getattr(dt, 'fold', None)
-
- if self.is_ambiguous(dt):
- if fold is not None:
- return not self._fold(dt)
- else:
- return True
-
- return dstval
-
- def __eq__(self, other):
- if isinstance(other, tzlocal):
- return (self._std_offset == other._std_offset and
- self._dst_offset == other._dst_offset)
- elif isinstance(other, tzutc):
- return (not self._hasdst and
- self._tznames[0] in {'UTC', 'GMT'} and
- self._std_offset == ZERO)
- elif isinstance(other, tzoffset):
- return (not self._hasdst and
- self._tznames[0] == other._name and
- self._std_offset == other._offset)
- else:
- return NotImplemented
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s()" % self.__class__.__name__
-
- __reduce__ = object.__reduce__
-
-
-class _ttinfo(object):
- __slots__ = ["offset", "delta", "isdst", "abbr",
- "isstd", "isgmt", "dstoffset"]
-
- def __init__(self):
- for attr in self.__slots__:
- setattr(self, attr, None)
-
- def __repr__(self):
- l = []
- for attr in self.__slots__:
- value = getattr(self, attr)
- if value is not None:
- l.append("%s=%s" % (attr, repr(value)))
- return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
-
- def __eq__(self, other):
- if not isinstance(other, _ttinfo):
- return NotImplemented
-
- return (self.offset == other.offset and
- self.delta == other.delta and
- self.isdst == other.isdst and
- self.abbr == other.abbr and
- self.isstd == other.isstd and
- self.isgmt == other.isgmt and
- self.dstoffset == other.dstoffset)
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __getstate__(self):
- state = {}
- for name in self.__slots__:
- state[name] = getattr(self, name, None)
- return state
-
- def __setstate__(self, state):
- for name in self.__slots__:
- if name in state:
- setattr(self, name, state[name])
-
-
-class _tzfile(object):
- """
- Lightweight class for holding the relevant transition and time zone
- information read from binary tzfiles.
- """
- attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list',
- 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
-
- def __init__(self, **kwargs):
- for attr in self.attrs:
- setattr(self, attr, kwargs.get(attr, None))
-
-
-class tzfile(_tzinfo):
- """
- This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)``
- format timezone files to extract current and historical zone information.
-
- :param fileobj:
- This can be an opened file stream or a file name that the time zone
- information can be read from.
-
- :param filename:
- This is an optional parameter specifying the source of the time zone
- information in the event that ``fileobj`` is a file object. If omitted
- and ``fileobj`` is a file stream, this parameter will be set either to
- ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
-
- See `Sources for Time Zone and Daylight Saving Time Data
- `_ for more information.
- Time zone files can be compiled from the `IANA Time Zone database files
- `_ with the `zic time zone compiler
- `_
-
- .. note::
-
- Only construct a ``tzfile`` directly if you have a specific timezone
- file on disk that you want to read into a Python ``tzinfo`` object.
- If you want to get a ``tzfile`` representing a specific IANA zone,
- (e.g. ``'America/New_York'``), you should call
- :func:`dateutil.tz.gettz` with the zone identifier.
-
-
- **Examples:**
-
- Using the US Eastern time zone as an example, we can see that a ``tzfile``
- provides time zone information for the standard Daylight Saving offsets:
-
- .. testsetup:: tzfile
-
- from dateutil.tz import gettz
- from datetime import datetime
-
- .. doctest:: tzfile
-
- >>> NYC = gettz('America/New_York')
- >>> NYC
- tzfile('/usr/share/zoneinfo/America/New_York')
-
- >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST
- 2016-01-03 00:00:00-05:00
-
- >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT
- 2016-07-07 00:00:00-04:00
-
-
- The ``tzfile`` structure contains a fully history of the time zone,
- so historical dates will also have the right offsets. For example, before
- the adoption of the UTC standards, New York used local solar mean time:
-
- .. doctest:: tzfile
-
- >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT
- 1901-04-12 00:00:00-04:56
-
- And during World War II, New York was on "Eastern War Time", which was a
- state of permanent daylight saving time:
-
- .. doctest:: tzfile
-
- >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT
- 1944-02-07 00:00:00-04:00
-
- """
-
- def __init__(self, fileobj, filename=None):
- super(tzfile, self).__init__()
-
- file_opened_here = False
- if isinstance(fileobj, string_types):
- self._filename = fileobj
- fileobj = open(fileobj, 'rb')
- file_opened_here = True
- elif filename is not None:
- self._filename = filename
- elif hasattr(fileobj, "name"):
- self._filename = fileobj.name
- else:
- self._filename = repr(fileobj)
-
- if fileobj is not None:
- if not file_opened_here:
- fileobj = _nullcontext(fileobj)
-
- with fileobj as file_stream:
- tzobj = self._read_tzfile(file_stream)
-
- self._set_tzdata(tzobj)
-
- def _set_tzdata(self, tzobj):
- """ Set the time zone data of this object from a _tzfile object """
- # Copy the relevant attributes over as private attributes
- for attr in _tzfile.attrs:
- setattr(self, '_' + attr, getattr(tzobj, attr))
-
- def _read_tzfile(self, fileobj):
- out = _tzfile()
-
- # From tzfile(5):
- #
- # The time zone information files used by tzset(3)
- # begin with the magic characters "TZif" to identify
- # them as time zone information files, followed by
- # sixteen bytes reserved for future use, followed by
- # six four-byte values of type long, written in a
- # ``standard'' byte order (the high-order byte
- # of the value is written first).
- if fileobj.read(4).decode() != "TZif":
- raise ValueError("magic not found")
-
- fileobj.read(16)
-
- (
- # The number of UTC/local indicators stored in the file.
- ttisgmtcnt,
-
- # The number of standard/wall indicators stored in the file.
- ttisstdcnt,
-
- # The number of leap seconds for which data is
- # stored in the file.
- leapcnt,
-
- # The number of "transition times" for which data
- # is stored in the file.
- timecnt,
-
- # The number of "local time types" for which data
- # is stored in the file (must not be zero).
- typecnt,
-
- # The number of characters of "time zone
- # abbreviation strings" stored in the file.
- charcnt,
-
- ) = struct.unpack(">6l", fileobj.read(24))
-
- # The above header is followed by tzh_timecnt four-byte
- # values of type long, sorted in ascending order.
- # These values are written in ``standard'' byte order.
- # Each is used as a transition time (as returned by
- # time(2)) at which the rules for computing local time
- # change.
-
- if timecnt:
- out.trans_list_utc = list(struct.unpack(">%dl" % timecnt,
- fileobj.read(timecnt*4)))
- else:
- out.trans_list_utc = []
-
- # Next come tzh_timecnt one-byte values of type unsigned
- # char; each one tells which of the different types of
- # ``local time'' types described in the file is associated
- # with the same-indexed transition time. These values
- # serve as indices into an array of ttinfo structures that
- # appears next in the file.
-
- if timecnt:
- out.trans_idx = struct.unpack(">%dB" % timecnt,
- fileobj.read(timecnt))
- else:
- out.trans_idx = []
-
- # Each ttinfo structure is written as a four-byte value
- # for tt_gmtoff of type long, in a standard byte
- # order, followed by a one-byte value for tt_isdst
- # and a one-byte value for tt_abbrind. In each
- # structure, tt_gmtoff gives the number of
- # seconds to be added to UTC, tt_isdst tells whether
- # tm_isdst should be set by localtime(3), and
- # tt_abbrind serves as an index into the array of
- # time zone abbreviation characters that follow the
- # ttinfo structure(s) in the file.
-
- ttinfo = []
-
- for i in range(typecnt):
- ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
-
- abbr = fileobj.read(charcnt).decode()
-
- # Then there are tzh_leapcnt pairs of four-byte
- # values, written in standard byte order; the
- # first value of each pair gives the time (as
- # returned by time(2)) at which a leap second
- # occurs; the second gives the total number of
- # leap seconds to be applied after the given time.
- # The pairs of values are sorted in ascending order
- # by time.
-
- # Not used, for now (but seek for correct file position)
- if leapcnt:
- fileobj.seek(leapcnt * 8, os.SEEK_CUR)
-
- # Then there are tzh_ttisstdcnt standard/wall
- # indicators, each stored as a one-byte value;
- # they tell whether the transition times associated
- # with local time types were specified as standard
- # time or wall clock time, and are used when
- # a time zone file is used in handling POSIX-style
- # time zone environment variables.
-
- if ttisstdcnt:
- isstd = struct.unpack(">%db" % ttisstdcnt,
- fileobj.read(ttisstdcnt))
-
- # Finally, there are tzh_ttisgmtcnt UTC/local
- # indicators, each stored as a one-byte value;
- # they tell whether the transition times associated
- # with local time types were specified as UTC or
- # local time, and are used when a time zone file
- # is used in handling POSIX-style time zone envi-
- # ronment variables.
-
- if ttisgmtcnt:
- isgmt = struct.unpack(">%db" % ttisgmtcnt,
- fileobj.read(ttisgmtcnt))
-
- # Build ttinfo list
- out.ttinfo_list = []
- for i in range(typecnt):
- gmtoff, isdst, abbrind = ttinfo[i]
- gmtoff = _get_supported_offset(gmtoff)
- tti = _ttinfo()
- tti.offset = gmtoff
- tti.dstoffset = datetime.timedelta(0)
- tti.delta = datetime.timedelta(seconds=gmtoff)
- tti.isdst = isdst
- tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
- tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
- tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
- out.ttinfo_list.append(tti)
-
- # Replace ttinfo indexes for ttinfo objects.
- out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
-
- # Set standard, dst, and before ttinfos. before will be
- # used when a given time is before any transitions,
- # and will be set to the first non-dst ttinfo, or to
- # the first dst, if all of them are dst.
- out.ttinfo_std = None
- out.ttinfo_dst = None
- out.ttinfo_before = None
- if out.ttinfo_list:
- if not out.trans_list_utc:
- out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
- else:
- for i in range(timecnt-1, -1, -1):
- tti = out.trans_idx[i]
- if not out.ttinfo_std and not tti.isdst:
- out.ttinfo_std = tti
- elif not out.ttinfo_dst and tti.isdst:
- out.ttinfo_dst = tti
-
- if out.ttinfo_std and out.ttinfo_dst:
- break
- else:
- if out.ttinfo_dst and not out.ttinfo_std:
- out.ttinfo_std = out.ttinfo_dst
-
- for tti in out.ttinfo_list:
- if not tti.isdst:
- out.ttinfo_before = tti
- break
- else:
- out.ttinfo_before = out.ttinfo_list[0]
-
- # Now fix transition times to become relative to wall time.
- #
- # I'm not sure about this. In my tests, the tz source file
- # is setup to wall time, and in the binary file isstd and
- # isgmt are off, so it should be in wall time. OTOH, it's
- # always in gmt time. Let me know if you have comments
- # about this.
- lastdst = None
- lastoffset = None
- lastdstoffset = None
- lastbaseoffset = None
- out.trans_list = []
-
- for i, tti in enumerate(out.trans_idx):
- offset = tti.offset
- dstoffset = 0
-
- if lastdst is not None:
- if tti.isdst:
- if not lastdst:
- dstoffset = offset - lastoffset
-
- if not dstoffset and lastdstoffset:
- dstoffset = lastdstoffset
-
- tti.dstoffset = datetime.timedelta(seconds=dstoffset)
- lastdstoffset = dstoffset
-
- # If a time zone changes its base offset during a DST transition,
- # then you need to adjust by the previous base offset to get the
- # transition time in local time. Otherwise you use the current
- # base offset. Ideally, I would have some mathematical proof of
- # why this is true, but I haven't really thought about it enough.
- baseoffset = offset - dstoffset
- adjustment = baseoffset
- if (lastbaseoffset is not None and baseoffset != lastbaseoffset
- and tti.isdst != lastdst):
- # The base DST has changed
- adjustment = lastbaseoffset
-
- lastdst = tti.isdst
- lastoffset = offset
- lastbaseoffset = baseoffset
-
- out.trans_list.append(out.trans_list_utc[i] + adjustment)
-
- out.trans_idx = tuple(out.trans_idx)
- out.trans_list = tuple(out.trans_list)
- out.trans_list_utc = tuple(out.trans_list_utc)
-
- return out
-
- def _find_last_transition(self, dt, in_utc=False):
- # If there's no list, there are no transitions to find
- if not self._trans_list:
- return None
-
- timestamp = _datetime_to_timestamp(dt)
-
- # Find where the timestamp fits in the transition list - if the
- # timestamp is a transition time, it's part of the "after" period.
- trans_list = self._trans_list_utc if in_utc else self._trans_list
- idx = bisect.bisect_right(trans_list, timestamp)
-
- # We want to know when the previous transition was, so subtract off 1
- return idx - 1
-
- def _get_ttinfo(self, idx):
- # For no list or after the last transition, default to _ttinfo_std
- if idx is None or (idx + 1) >= len(self._trans_list):
- return self._ttinfo_std
-
- # If there is a list and the time is before it, return _ttinfo_before
- if idx < 0:
- return self._ttinfo_before
-
- return self._trans_idx[idx]
-
- def _find_ttinfo(self, dt):
- idx = self._resolve_ambiguous_time(dt)
-
- return self._get_ttinfo(idx)
-
- def fromutc(self, dt):
- """
- The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
-
- :param dt:
- A :py:class:`datetime.datetime` object.
-
- :raises TypeError:
- Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
-
- :raises ValueError:
- Raised if this is called with a ``dt`` which does not have this
- ``tzinfo`` attached.
-
- :return:
- Returns a :py:class:`datetime.datetime` object representing the
- wall time in ``self``'s time zone.
- """
- # These isinstance checks are in datetime.tzinfo, so we'll preserve
- # them, even if we don't care about duck typing.
- if not isinstance(dt, datetime.datetime):
- raise TypeError("fromutc() requires a datetime argument")
-
- if dt.tzinfo is not self:
- raise ValueError("dt.tzinfo is not self")
-
- # First treat UTC as wall time and get the transition we're in.
- idx = self._find_last_transition(dt, in_utc=True)
- tti = self._get_ttinfo(idx)
-
- dt_out = dt + datetime.timedelta(seconds=tti.offset)
-
- fold = self.is_ambiguous(dt_out, idx=idx)
-
- return enfold(dt_out, fold=int(fold))
-
- def is_ambiguous(self, dt, idx=None):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- if idx is None:
- idx = self._find_last_transition(dt)
-
- # Calculate the difference in offsets from current to previous
- timestamp = _datetime_to_timestamp(dt)
- tti = self._get_ttinfo(idx)
-
- if idx is None or idx <= 0:
- return False
-
- od = self._get_ttinfo(idx - 1).offset - tti.offset
- tt = self._trans_list[idx] # Transition time
-
- return timestamp < tt + od
-
- def _resolve_ambiguous_time(self, dt):
- idx = self._find_last_transition(dt)
-
- # If we have no transitions, return the index
- _fold = self._fold(dt)
- if idx is None or idx == 0:
- return idx
-
- # If it's ambiguous and we're in a fold, shift to a different index.
- idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
-
- return idx - idx_offset
-
- def utcoffset(self, dt):
- if dt is None:
- return None
-
- if not self._ttinfo_std:
- return ZERO
-
- return self._find_ttinfo(dt).delta
-
- def dst(self, dt):
- if dt is None:
- return None
-
- if not self._ttinfo_dst:
- return ZERO
-
- tti = self._find_ttinfo(dt)
-
- if not tti.isdst:
- return ZERO
-
- # The documentation says that utcoffset()-dst() must
- # be constant for every dt.
- return tti.dstoffset
-
- @tzname_in_python2
- def tzname(self, dt):
- if not self._ttinfo_std or dt is None:
- return None
- return self._find_ttinfo(dt).abbr
-
- def __eq__(self, other):
- if not isinstance(other, tzfile):
- return NotImplemented
- return (self._trans_list == other._trans_list and
- self._trans_idx == other._trans_idx and
- self._ttinfo_list == other._ttinfo_list)
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
-
- def __reduce__(self):
- return self.__reduce_ex__(None)
-
- def __reduce_ex__(self, protocol):
- return (self.__class__, (None, self._filename), self.__dict__)
-
-
-class tzrange(tzrangebase):
- """
- The ``tzrange`` object is a time zone specified by a set of offsets and
- abbreviations, equivalent to the way the ``TZ`` variable can be specified
- in POSIX-like systems, but using Python delta objects to specify DST
- start, end and offsets.
-
- :param stdabbr:
- The abbreviation for standard time (e.g. ``'EST'``).
-
- :param stdoffset:
- An integer or :class:`datetime.timedelta` object or equivalent
- specifying the base offset from UTC.
-
- If unspecified, +00:00 is used.
-
- :param dstabbr:
- The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
-
- If specified, with no other DST information, DST is assumed to occur
- and the default behavior or ``dstoffset``, ``start`` and ``end`` is
- used. If unspecified and no other DST information is specified, it
- is assumed that this zone has no DST.
-
- If this is unspecified and other DST information is *is* specified,
- DST occurs in the zone but the time zone abbreviation is left
- unchanged.
-
- :param dstoffset:
- A an integer or :class:`datetime.timedelta` object or equivalent
- specifying the UTC offset during DST. If unspecified and any other DST
- information is specified, it is assumed to be the STD offset +1 hour.
-
- :param start:
- A :class:`relativedelta.relativedelta` object or equivalent specifying
- the time and time of year that daylight savings time starts. To
- specify, for example, that DST starts at 2AM on the 2nd Sunday in
- March, pass:
-
- ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
-
- If unspecified and any other DST information is specified, the default
- value is 2 AM on the first Sunday in April.
-
- :param end:
- A :class:`relativedelta.relativedelta` object or equivalent
- representing the time and time of year that daylight savings time
- ends, with the same specification method as in ``start``. One note is
- that this should point to the first time in the *standard* zone, so if
- a transition occurs at 2AM in the DST zone and the clocks are set back
- 1 hour to 1AM, set the ``hours`` parameter to +1.
-
-
- **Examples:**
-
- .. testsetup:: tzrange
-
- from dateutil.tz import tzrange, tzstr
-
- .. doctest:: tzrange
-
- >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
- True
-
- >>> from dateutil.relativedelta import *
- >>> range1 = tzrange("EST", -18000, "EDT")
- >>> range2 = tzrange("EST", -18000, "EDT", -14400,
- ... relativedelta(hours=+2, month=4, day=1,
- ... weekday=SU(+1)),
- ... relativedelta(hours=+1, month=10, day=31,
- ... weekday=SU(-1)))
- >>> tzstr('EST5EDT') == range1 == range2
- True
-
- """
- def __init__(self, stdabbr, stdoffset=None,
- dstabbr=None, dstoffset=None,
- start=None, end=None):
-
- global relativedelta
- from dateutil import relativedelta
-
- self._std_abbr = stdabbr
- self._dst_abbr = dstabbr
-
- try:
- stdoffset = stdoffset.total_seconds()
- except (TypeError, AttributeError):
- pass
-
- try:
- dstoffset = dstoffset.total_seconds()
- except (TypeError, AttributeError):
- pass
-
- if stdoffset is not None:
- self._std_offset = datetime.timedelta(seconds=stdoffset)
- else:
- self._std_offset = ZERO
-
- if dstoffset is not None:
- self._dst_offset = datetime.timedelta(seconds=dstoffset)
- elif dstabbr and stdoffset is not None:
- self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
- else:
- self._dst_offset = ZERO
-
- if dstabbr and start is None:
- self._start_delta = relativedelta.relativedelta(
- hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
- else:
- self._start_delta = start
-
- if dstabbr and end is None:
- self._end_delta = relativedelta.relativedelta(
- hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
- else:
- self._end_delta = end
-
- self._dst_base_offset_ = self._dst_offset - self._std_offset
- self.hasdst = bool(self._start_delta)
-
- def transitions(self, year):
- """
- For a given year, get the DST on and off transition times, expressed
- always on the standard time side. For zones with no transitions, this
- function returns ``None``.
-
- :param year:
- The year whose transitions you would like to query.
-
- :return:
- Returns a :class:`tuple` of :class:`datetime.datetime` objects,
- ``(dston, dstoff)`` for zones with an annual DST transition, or
- ``None`` for fixed offset zones.
- """
- if not self.hasdst:
- return None
-
- base_year = datetime.datetime(year, 1, 1)
-
- start = base_year + self._start_delta
- end = base_year + self._end_delta
-
- return (start, end)
-
- def __eq__(self, other):
- if not isinstance(other, tzrange):
- return NotImplemented
-
- return (self._std_abbr == other._std_abbr and
- self._dst_abbr == other._dst_abbr and
- self._std_offset == other._std_offset and
- self._dst_offset == other._dst_offset and
- self._start_delta == other._start_delta and
- self._end_delta == other._end_delta)
-
- @property
- def _dst_base_offset(self):
- return self._dst_base_offset_
-
-
-@six.add_metaclass(_TzStrFactory)
-class tzstr(tzrange):
- """
- ``tzstr`` objects are time zone objects specified by a time-zone string as
- it would be passed to a ``TZ`` variable on POSIX-style systems (see
- the `GNU C Library: TZ Variable`_ for more details).
-
- There is one notable exception, which is that POSIX-style time zones use an
- inverted offset format, so normally ``GMT+3`` would be parsed as an offset
- 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
- offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
- behavior, pass a ``True`` value to ``posix_offset``.
-
- The :class:`tzrange` object provides the same functionality, but is
- specified using :class:`relativedelta.relativedelta` objects. rather than
- strings.
-
- :param s:
- A time zone string in ``TZ`` variable format. This can be a
- :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x:
- :class:`unicode`) or a stream emitting unicode characters
- (e.g. :class:`StringIO`).
-
- :param posix_offset:
- Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
- ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
- POSIX standard.
-
- .. caution::
-
- Prior to version 2.7.0, this function also supported time zones
- in the format:
-
- * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600``
- * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600``
-
- This format is non-standard and has been deprecated; this function
- will raise a :class:`DeprecatedTZFormatWarning` until
- support is removed in a future version.
-
- .. _`GNU C Library: TZ Variable`:
- https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
- """
- def __init__(self, s, posix_offset=False):
- global parser
- from dateutil.parser import _parser as parser
-
- self._s = s
-
- res = parser._parsetz(s)
- if res is None or res.any_unused_tokens:
- raise ValueError("unknown string format")
-
- # Here we break the compatibility with the TZ variable handling.
- # GMT-3 actually *means* the timezone -3.
- if res.stdabbr in ("GMT", "UTC") and not posix_offset:
- res.stdoffset *= -1
-
- # We must initialize it first, since _delta() needs
- # _std_offset and _dst_offset set. Use False in start/end
- # to avoid building it two times.
- tzrange.__init__(self, res.stdabbr, res.stdoffset,
- res.dstabbr, res.dstoffset,
- start=False, end=False)
-
- if not res.dstabbr:
- self._start_delta = None
- self._end_delta = None
- else:
- self._start_delta = self._delta(res.start)
- if self._start_delta:
- self._end_delta = self._delta(res.end, isend=1)
-
- self.hasdst = bool(self._start_delta)
-
- def _delta(self, x, isend=0):
- from dateutil import relativedelta
- kwargs = {}
- if x.month is not None:
- kwargs["month"] = x.month
- if x.weekday is not None:
- kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
- if x.week > 0:
- kwargs["day"] = 1
- else:
- kwargs["day"] = 31
- elif x.day:
- kwargs["day"] = x.day
- elif x.yday is not None:
- kwargs["yearday"] = x.yday
- elif x.jyday is not None:
- kwargs["nlyearday"] = x.jyday
- if not kwargs:
- # Default is to start on first sunday of april, and end
- # on last sunday of october.
- if not isend:
- kwargs["month"] = 4
- kwargs["day"] = 1
- kwargs["weekday"] = relativedelta.SU(+1)
- else:
- kwargs["month"] = 10
- kwargs["day"] = 31
- kwargs["weekday"] = relativedelta.SU(-1)
- if x.time is not None:
- kwargs["seconds"] = x.time
- else:
- # Default is 2AM.
- kwargs["seconds"] = 7200
- if isend:
- # Convert to standard time, to follow the documented way
- # of working with the extra hour. See the documentation
- # of the tzinfo class.
- delta = self._dst_offset - self._std_offset
- kwargs["seconds"] -= delta.seconds + delta.days * 86400
- return relativedelta.relativedelta(**kwargs)
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, repr(self._s))
-
-
-class _tzicalvtzcomp(object):
- def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
- tzname=None, rrule=None):
- self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
- self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
- self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
- self.isdst = isdst
- self.tzname = tzname
- self.rrule = rrule
-
-
-class _tzicalvtz(_tzinfo):
- def __init__(self, tzid, comps=[]):
- super(_tzicalvtz, self).__init__()
-
- self._tzid = tzid
- self._comps = comps
- self._cachedate = []
- self._cachecomp = []
- self._cache_lock = _thread.allocate_lock()
-
- def _find_comp(self, dt):
- if len(self._comps) == 1:
- return self._comps[0]
-
- dt = dt.replace(tzinfo=None)
-
- try:
- with self._cache_lock:
- return self._cachecomp[self._cachedate.index(
- (dt, self._fold(dt)))]
- except ValueError:
- pass
-
- lastcompdt = None
- lastcomp = None
-
- for comp in self._comps:
- compdt = self._find_compdt(comp, dt)
-
- if compdt and (not lastcompdt or lastcompdt < compdt):
- lastcompdt = compdt
- lastcomp = comp
-
- if not lastcomp:
- # RFC says nothing about what to do when a given
- # time is before the first onset date. We'll look for the
- # first standard component, or the first component, if
- # none is found.
- for comp in self._comps:
- if not comp.isdst:
- lastcomp = comp
- break
- else:
- lastcomp = comp[0]
-
- with self._cache_lock:
- self._cachedate.insert(0, (dt, self._fold(dt)))
- self._cachecomp.insert(0, lastcomp)
-
- if len(self._cachedate) > 10:
- self._cachedate.pop()
- self._cachecomp.pop()
-
- return lastcomp
-
- def _find_compdt(self, comp, dt):
- if comp.tzoffsetdiff < ZERO and self._fold(dt):
- dt -= comp.tzoffsetdiff
-
- compdt = comp.rrule.before(dt, inc=True)
-
- return compdt
-
- def utcoffset(self, dt):
- if dt is None:
- return None
-
- return self._find_comp(dt).tzoffsetto
-
- def dst(self, dt):
- comp = self._find_comp(dt)
- if comp.isdst:
- return comp.tzoffsetdiff
- else:
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- return self._find_comp(dt).tzname
-
- def __repr__(self):
- return "" % repr(self._tzid)
-
- __reduce__ = object.__reduce__
-
-
-class tzical(object):
- """
- This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
- as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects.
-
- :param `fileobj`:
- A file or stream in iCalendar format, which should be UTF-8 encoded
- with CRLF endings.
-
- .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545
- """
- def __init__(self, fileobj):
- global rrule
- from dateutil import rrule
-
- if isinstance(fileobj, string_types):
- self._s = fileobj
- # ical should be encoded in UTF-8 with CRLF
- fileobj = open(fileobj, 'r')
- else:
- self._s = getattr(fileobj, 'name', repr(fileobj))
- fileobj = _nullcontext(fileobj)
-
- self._vtz = {}
-
- with fileobj as fobj:
- self._parse_rfc(fobj.read())
-
- def keys(self):
- """
- Retrieves the available time zones as a list.
- """
- return list(self._vtz.keys())
-
- def get(self, tzid=None):
- """
- Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
-
- :param tzid:
- If there is exactly one time zone available, omitting ``tzid``
- or passing :py:const:`None` value returns it. Otherwise a valid
- key (which can be retrieved from :func:`keys`) is required.
-
- :raises ValueError:
- Raised if ``tzid`` is not specified but there are either more
- or fewer than 1 zone defined.
-
- :returns:
- Returns either a :py:class:`datetime.tzinfo` object representing
- the relevant time zone or :py:const:`None` if the ``tzid`` was
- not found.
- """
- if tzid is None:
- if len(self._vtz) == 0:
- raise ValueError("no timezones defined")
- elif len(self._vtz) > 1:
- raise ValueError("more than one timezone available")
- tzid = next(iter(self._vtz))
-
- return self._vtz.get(tzid)
-
- def _parse_offset(self, s):
- s = s.strip()
- if not s:
- raise ValueError("empty offset")
- if s[0] in ('+', '-'):
- signal = (-1, +1)[s[0] == '+']
- s = s[1:]
- else:
- signal = +1
- if len(s) == 4:
- return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
- elif len(s) == 6:
- return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
- else:
- raise ValueError("invalid offset: " + s)
-
- def _parse_rfc(self, s):
- lines = s.splitlines()
- if not lines:
- raise ValueError("empty string")
-
- # Unfold
- i = 0
- while i < len(lines):
- line = lines[i].rstrip()
- if not line:
- del lines[i]
- elif i > 0 and line[0] == " ":
- lines[i-1] += line[1:]
- del lines[i]
- else:
- i += 1
-
- tzid = None
- comps = []
- invtz = False
- comptype = None
- for line in lines:
- if not line:
- continue
- name, value = line.split(':', 1)
- parms = name.split(';')
- if not parms:
- raise ValueError("empty property name")
- name = parms[0].upper()
- parms = parms[1:]
- if invtz:
- if name == "BEGIN":
- if value in ("STANDARD", "DAYLIGHT"):
- # Process component
- pass
- else:
- raise ValueError("unknown component: "+value)
- comptype = value
- founddtstart = False
- tzoffsetfrom = None
- tzoffsetto = None
- rrulelines = []
- tzname = None
- elif name == "END":
- if value == "VTIMEZONE":
- if comptype:
- raise ValueError("component not closed: "+comptype)
- if not tzid:
- raise ValueError("mandatory TZID not found")
- if not comps:
- raise ValueError(
- "at least one component is needed")
- # Process vtimezone
- self._vtz[tzid] = _tzicalvtz(tzid, comps)
- invtz = False
- elif value == comptype:
- if not founddtstart:
- raise ValueError("mandatory DTSTART not found")
- if tzoffsetfrom is None:
- raise ValueError(
- "mandatory TZOFFSETFROM not found")
- if tzoffsetto is None:
- raise ValueError(
- "mandatory TZOFFSETFROM not found")
- # Process component
- rr = None
- if rrulelines:
- rr = rrule.rrulestr("\n".join(rrulelines),
- compatible=True,
- ignoretz=True,
- cache=True)
- comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
- (comptype == "DAYLIGHT"),
- tzname, rr)
- comps.append(comp)
- comptype = None
- else:
- raise ValueError("invalid component end: "+value)
- elif comptype:
- if name == "DTSTART":
- # DTSTART in VTIMEZONE takes a subset of valid RRULE
- # values under RFC 5545.
- for parm in parms:
- if parm != 'VALUE=DATE-TIME':
- msg = ('Unsupported DTSTART param in ' +
- 'VTIMEZONE: ' + parm)
- raise ValueError(msg)
- rrulelines.append(line)
- founddtstart = True
- elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
- rrulelines.append(line)
- elif name == "TZOFFSETFROM":
- if parms:
- raise ValueError(
- "unsupported %s parm: %s " % (name, parms[0]))
- tzoffsetfrom = self._parse_offset(value)
- elif name == "TZOFFSETTO":
- if parms:
- raise ValueError(
- "unsupported TZOFFSETTO parm: "+parms[0])
- tzoffsetto = self._parse_offset(value)
- elif name == "TZNAME":
- if parms:
- raise ValueError(
- "unsupported TZNAME parm: "+parms[0])
- tzname = value
- elif name == "COMMENT":
- pass
- else:
- raise ValueError("unsupported property: "+name)
- else:
- if name == "TZID":
- if parms:
- raise ValueError(
- "unsupported TZID parm: "+parms[0])
- tzid = value
- elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
- pass
- else:
- raise ValueError("unsupported property: "+name)
- elif name == "BEGIN" and value == "VTIMEZONE":
- tzid = None
- comps = []
- invtz = True
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, repr(self._s))
-
-
-if sys.platform != "win32":
- TZFILES = ["/etc/localtime", "localtime"]
- TZPATHS = ["/usr/share/zoneinfo",
- "/usr/lib/zoneinfo",
- "/usr/share/lib/zoneinfo",
- "/etc/zoneinfo"]
-else:
- TZFILES = []
- TZPATHS = []
-
-
-def __get_gettz():
- tzlocal_classes = (tzlocal,)
- if tzwinlocal is not None:
- tzlocal_classes += (tzwinlocal,)
-
- class GettzFunc(object):
- """
- Retrieve a time zone object from a string representation
-
- This function is intended to retrieve the :py:class:`tzinfo` subclass
- that best represents the time zone that would be used if a POSIX
- `TZ variable`_ were set to the same value.
-
- If no argument or an empty string is passed to ``gettz``, local time
- is returned:
-
- .. code-block:: python3
-
- >>> gettz()
- tzfile('/etc/localtime')
-
- This function is also the preferred way to map IANA tz database keys
- to :class:`tzfile` objects:
-
- .. code-block:: python3
-
- >>> gettz('Pacific/Kiritimati')
- tzfile('/usr/share/zoneinfo/Pacific/Kiritimati')
-
- On Windows, the standard is extended to include the Windows-specific
- zone names provided by the operating system:
-
- .. code-block:: python3
-
- >>> gettz('Egypt Standard Time')
- tzwin('Egypt Standard Time')
-
- Passing a GNU ``TZ`` style string time zone specification returns a
- :class:`tzstr` object:
-
- .. code-block:: python3
-
- >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
- tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
-
- :param name:
- A time zone name (IANA, or, on Windows, Windows keys), location of
- a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone
- specifier. An empty string, no argument or ``None`` is interpreted
- as local time.
-
- :return:
- Returns an instance of one of ``dateutil``'s :py:class:`tzinfo`
- subclasses.
-
- .. versionchanged:: 2.7.0
-
- After version 2.7.0, any two calls to ``gettz`` using the same
- input strings will return the same object:
-
- .. code-block:: python3
-
- >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago')
- True
-
- In addition to improving performance, this ensures that
- `"same zone" semantics`_ are used for datetimes in the same zone.
-
-
- .. _`TZ variable`:
- https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
-
- .. _`"same zone" semantics`:
- https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html
- """
- def __init__(self):
-
- self.__instances = weakref.WeakValueDictionary()
- self.__strong_cache_size = 8
- self.__strong_cache = OrderedDict()
- self._cache_lock = _thread.allocate_lock()
-
- def __call__(self, name=None):
- with self._cache_lock:
- rv = self.__instances.get(name, None)
-
- if rv is None:
- rv = self.nocache(name=name)
- if not (name is None
- or isinstance(rv, tzlocal_classes)
- or rv is None):
- # tzlocal is slightly more complicated than the other
- # time zone providers because it depends on environment
- # at construction time, so don't cache that.
- #
- # We also cannot store weak references to None, so we
- # will also not store that.
- self.__instances[name] = rv
- else:
- # No need for strong caching, return immediately
- return rv
-
- self.__strong_cache[name] = self.__strong_cache.pop(name, rv)
-
- if len(self.__strong_cache) > self.__strong_cache_size:
- self.__strong_cache.popitem(last=False)
-
- return rv
-
- def set_cache_size(self, size):
- with self._cache_lock:
- self.__strong_cache_size = size
- while len(self.__strong_cache) > size:
- self.__strong_cache.popitem(last=False)
-
- def cache_clear(self):
- with self._cache_lock:
- self.__instances = weakref.WeakValueDictionary()
- self.__strong_cache.clear()
-
- @staticmethod
- def nocache(name=None):
- """A non-cached version of gettz"""
- tz = None
- if not name:
- try:
- name = os.environ["TZ"]
- except KeyError:
- pass
- if name is None or name in ("", ":"):
- for filepath in TZFILES:
- if not os.path.isabs(filepath):
- filename = filepath
- for path in TZPATHS:
- filepath = os.path.join(path, filename)
- if os.path.isfile(filepath):
- break
- else:
- continue
- if os.path.isfile(filepath):
- try:
- tz = tzfile(filepath)
- break
- except (IOError, OSError, ValueError):
- pass
- else:
- tz = tzlocal()
- else:
- try:
- if name.startswith(":"):
- name = name[1:]
- except TypeError as e:
- if isinstance(name, bytes):
- new_msg = "gettz argument should be str, not bytes"
- six.raise_from(TypeError(new_msg), e)
- else:
- raise
- if os.path.isabs(name):
- if os.path.isfile(name):
- tz = tzfile(name)
- else:
- tz = None
- else:
- for path in TZPATHS:
- filepath = os.path.join(path, name)
- if not os.path.isfile(filepath):
- filepath = filepath.replace(' ', '_')
- if not os.path.isfile(filepath):
- continue
- try:
- tz = tzfile(filepath)
- break
- except (IOError, OSError, ValueError):
- pass
- else:
- tz = None
- if tzwin is not None:
- try:
- tz = tzwin(name)
- except (WindowsError, UnicodeEncodeError):
- # UnicodeEncodeError is for Python 2.7 compat
- tz = None
-
- if not tz:
- from dateutil.zoneinfo import get_zonefile_instance
- tz = get_zonefile_instance().get(name)
-
- if not tz:
- for c in name:
- # name is not a tzstr unless it has at least
- # one offset. For short values of "name", an
- # explicit for loop seems to be the fastest way
- # To determine if a string contains a digit
- if c in "0123456789":
- try:
- tz = tzstr(name)
- except ValueError:
- pass
- break
- else:
- if name in ("GMT", "UTC"):
- tz = UTC
- elif name in time.tzname:
- tz = tzlocal()
- return tz
-
- return GettzFunc()
-
-
-gettz = __get_gettz()
-del __get_gettz
-
-
-def datetime_exists(dt, tz=None):
- """
- Given a datetime and a time zone, determine whether or not a given datetime
- would fall in a gap.
-
- :param dt:
- A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
- is provided.)
-
- :param tz:
- A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
- ``None`` or not provided, the datetime's own time zone will be used.
-
- :return:
- Returns a boolean value whether or not the "wall time" exists in
- ``tz``.
-
- .. versionadded:: 2.7.0
- """
- if tz is None:
- if dt.tzinfo is None:
- raise ValueError('Datetime is naive and no time zone provided.')
- tz = dt.tzinfo
-
- dt = dt.replace(tzinfo=None)
-
- # This is essentially a test of whether or not the datetime can survive
- # a round trip to UTC.
- dt_rt = dt.replace(tzinfo=tz).astimezone(UTC).astimezone(tz)
- dt_rt = dt_rt.replace(tzinfo=None)
-
- return dt == dt_rt
-
-
-def datetime_ambiguous(dt, tz=None):
- """
- Given a datetime and a time zone, determine whether or not a given datetime
- is ambiguous (i.e if there are two times differentiated only by their DST
- status).
-
- :param dt:
- A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
- is provided.)
-
- :param tz:
- A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
- ``None`` or not provided, the datetime's own time zone will be used.
-
- :return:
- Returns a boolean value whether or not the "wall time" is ambiguous in
- ``tz``.
-
- .. versionadded:: 2.6.0
- """
- if tz is None:
- if dt.tzinfo is None:
- raise ValueError('Datetime is naive and no time zone provided.')
-
- tz = dt.tzinfo
-
- # If a time zone defines its own "is_ambiguous" function, we'll use that.
- is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
- if is_ambiguous_fn is not None:
- try:
- return tz.is_ambiguous(dt)
- except Exception:
- pass
-
- # If it doesn't come out and tell us it's ambiguous, we'll just check if
- # the fold attribute has any effect on this particular date and time.
- dt = dt.replace(tzinfo=tz)
- wall_0 = enfold(dt, fold=0)
- wall_1 = enfold(dt, fold=1)
-
- same_offset = wall_0.utcoffset() == wall_1.utcoffset()
- same_dst = wall_0.dst() == wall_1.dst()
-
- return not (same_offset and same_dst)
-
-
-def resolve_imaginary(dt):
- """
- Given a datetime that may be imaginary, return an existing datetime.
-
- This function assumes that an imaginary datetime represents what the
- wall time would be in a zone had the offset transition not occurred, so
- it will always fall forward by the transition's change in offset.
-
- .. doctest::
-
- >>> from dateutil import tz
- >>> from datetime import datetime
- >>> NYC = tz.gettz('America/New_York')
- >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC)))
- 2017-03-12 03:30:00-04:00
-
- >>> KIR = tz.gettz('Pacific/Kiritimati')
- >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR)))
- 1995-01-02 12:30:00+14:00
-
- As a note, :func:`datetime.astimezone` is guaranteed to produce a valid,
- existing datetime, so a round-trip to and from UTC is sufficient to get
- an extant datetime, however, this generally "falls back" to an earlier time
- rather than falling forward to the STD side (though no guarantees are made
- about this behavior).
-
- :param dt:
- A :class:`datetime.datetime` which may or may not exist.
-
- :return:
- Returns an existing :class:`datetime.datetime`. If ``dt`` was not
- imaginary, the datetime returned is guaranteed to be the same object
- passed to the function.
-
- .. versionadded:: 2.7.0
- """
- if dt.tzinfo is not None and not datetime_exists(dt):
-
- curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset()
- old_offset = (dt - datetime.timedelta(hours=24)).utcoffset()
-
- dt += curr_offset - old_offset
-
- return dt
-
-
-def _datetime_to_timestamp(dt):
- """
- Convert a :class:`datetime.datetime` object to an epoch timestamp in
- seconds since January 1, 1970, ignoring the time zone.
- """
- return (dt.replace(tzinfo=None) - EPOCH).total_seconds()
-
-
-if sys.version_info >= (3, 6):
- def _get_supported_offset(second_offset):
- return second_offset
-else:
- def _get_supported_offset(second_offset):
- # For python pre-3.6, round to full-minutes if that's not the case.
- # Python's datetime doesn't accept sub-minute timezones. Check
- # http://python.org/sf/1447945 or https://bugs.python.org/issue5288
- # for some information.
- old_offset = second_offset
- calculated_offset = 60 * ((second_offset + 30) // 60)
- return calculated_offset
-
-
-try:
- # Python 3.7 feature
- from contextlib import nullcontext as _nullcontext
-except ImportError:
- class _nullcontext(object):
- """
- Class for wrapping contexts so that they are passed through in a
- with statement.
- """
- def __init__(self, context):
- self.context = context
-
- def __enter__(self):
- return self.context
-
- def __exit__(*args, **kwargs):
- pass
-
-# vim:ts=4:sw=4:et
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/interpretation.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/interpretation.py
deleted file mode 100644
index fc476d99b813cd3ce1933112c7f002433ee36ebe..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/interpretation.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""gr.Interpretation() component"""
-
-from __future__ import annotations
-
-from typing import Any, Literal
-
-from gradio_client.documentation import document, set_documentation_group
-from gradio_client.serializing import SimpleSerializable
-
-from gradio.components.base import Component, _Keywords
-
-set_documentation_group("component")
-
-
-@document()
-class Interpretation(Component, SimpleSerializable):
- """
- Used to create an interpretation widget for a component.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {dict} with keys "original" and "interpretation".
-
- Guides: custom-interpretations-with-blocks
- """
-
- def __init__(
- self,
- component: Component,
- *,
- visible: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- component: Which component to show in the interpretation widget.
- visible: Whether or not the interpretation is visible.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- Component.__init__(
- self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs
- )
- self.component = component
-
- def get_config(self):
- return {
- "component": self.component.get_block_name(),
- "component_props": self.component.get_config(),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- visible: bool | None = None,
- ):
- return {
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
diff --git a/spaces/Dabs/Floyd-Steinberg-Dithering/app.py b/spaces/Dabs/Floyd-Steinberg-Dithering/app.py
deleted file mode 100644
index 86d93068c5353ab7104714f0a685ec23d50c50e8..0000000000000000000000000000000000000000
--- a/spaces/Dabs/Floyd-Steinberg-Dithering/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import numpy as np
-import gradio as gr
-
-def quantize(val, factor):
- quantized = (np.round(factor * np.array(val / 255)) * (255 / factor)).astype(int)
- return quantized
-
-
-def fsd(factor, input_img):
- img_arr = np.asarray(input_img)
- new_img = np.copy(img_arr)
-
- for y in range(img_arr.shape[1] - 1):
- for x in range(img_arr.shape[0] - 1):
- old_pixel = new_img[x, y].copy()
- new_pixel = quantize(old_pixel, factor)
- new_img[x, y] = new_pixel
-
- quant_error = old_pixel - new_pixel
- new_img[x + 1][y ] = new_img[x + 1][y ] + quant_error * 7 / 16
- new_img[x - 1][y + 1] = new_img[x - 1][y + 1] + quant_error * 3 / 16
- new_img[x ][y + 1] = new_img[x ][y + 1] + quant_error * 5 / 16
- new_img[x + 1][y + 1] = new_img[x + 1][y + 1] + quant_error * 1 / 16
- return new_img
-
-
-iface = gr.Interface(fsd,
- [gr.inputs.Slider(1, 10, 1),
- "image"],
- "pil",
- title="Floyd Steinberg dithering",
- description="Floyd Steinberg dithering algorithm")
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/DaleChen/AutoGPT/ui/api.py b/spaces/DaleChen/AutoGPT/ui/api.py
deleted file mode 100644
index 3b46ad32148b23f06c6eb64c88708fc2bf92e4dc..0000000000000000000000000000000000000000
--- a/spaces/DaleChen/AutoGPT/ui/api.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import os, sys
-import utils
-import uuid
-import json
-import subprocess, threading
-
-FILE_DIR = os.path.dirname(os.path.abspath(__file__))
-REPO_DIR = os.path.dirname(FILE_DIR)
-STATE_DIR = os.path.join(FILE_DIR, "state")
-sys.path.append(REPO_DIR)
-if not os.path.exists(STATE_DIR):
- os.mkdir(STATE_DIR)
-import time
-
-
-def get_openai_api_key():
- return os.getenv("OPENAI_API_KEY")
-
-
-running_apis = []
-
-
-def get_state(state_file):
- with open(state_file, "r") as f:
- state = json.load(f)
- return state
-
-
-def set_state(state_file, state):
- with open(state_file, "w") as f:
- json.dump(state, f)
-
-
-class AutoAPI:
- def __init__(self, openai_key, ai_name, ai_role, top_5_goals):
- self.openai_key = openai_key
- hex = uuid.uuid4().hex
- print(hex)
- self.state_file = os.path.join(STATE_DIR, f"state_{hex}.json")
- self.log_file = os.path.join(STATE_DIR, f"log_{hex}.json")
-
- newline = "\n"
- with open(os.path.join(REPO_DIR, "ai_settings.yaml"), "w") as f:
- f.write(
- f"""ai_goals:
-{newline.join([f'- {goal[0]}' for goal in top_5_goals if goal[0]])}
-ai_name: {ai_name}
-ai_role: {ai_role}
-"""
- )
- state = {
- "pending_input": None,
- "awaiting_input": False,
- "messages": [],
- "last_message_read_index": -1,
- }
- set_state(self.state_file, state)
-
- with open(self.log_file, "w") as f:
- subprocess.Popen(
- [
- "python",
- os.path.join(REPO_DIR, "ui", "api.py"),
- openai_key,
- self.state_file,
- ],
- cwd=REPO_DIR,
- stdout=f,
- stderr=f,
- )
-
- def send_message(self, message="Y"):
- state = get_state(self.state_file)
- state["pending_input"] = message
- state["awaiting_input"] = False
- set_state(self.state_file, state)
-
- def get_chatbot_response(self):
- while True:
- state = get_state(self.state_file)
- if (
- state["awaiting_input"]
- and state["last_message_read_index"] >= len(state["messages"]) - 1
- ):
- break
- if state["last_message_read_index"] >= len(state["messages"]) - 1:
- time.sleep(1)
- else:
- state["last_message_read_index"] += 1
- title, content = state["messages"][state["last_message_read_index"]]
- yield (f"**{title.strip()}** " if title else "") + utils.remove_color(
- content
- ).replace("\n", " ")
- set_state(self.state_file, state)
-
-
-if __name__ == "__main__":
- print(sys.argv)
- _, openai_key, state_file = sys.argv
- os.environ["OPENAI_API_KEY"] = openai_key
- import autogpt.config.config
- from autogpt.logs import logger
- from autogpt.cli import main
- import autogpt.utils
- from autogpt.spinner import Spinner
-
- def add_message(title, content):
- state = get_state(state_file)
- state["messages"].append((title, content))
- set_state(state_file, state)
-
- def typewriter_log(title="", title_color="", content="", *args, **kwargs):
- add_message(title, content)
-
- def warn(message, title="", *args, **kwargs):
- add_message(title, message)
-
- def error(title, message="", *args, **kwargs):
- add_message(title, message)
-
- def clean_input(prompt=""):
- add_message(None, prompt)
- state = get_state(state_file)
- state["awaiting_input"] = True
- set_state(state_file, state)
- while state["pending_input"] is None:
- state = get_state(state_file)
- print("Waiting for input...")
- time.sleep(1)
- print("Got input")
- pending_input = state["pending_input"]
- state["pending_input"] = None
- set_state(state_file, state)
- return pending_input
-
- def spinner_start():
- add_message(None, "Thinking...")
-
- logger.typewriter_log = typewriter_log
- logger.warn = warn
- logger.error = error
- autogpt.utils.clean_input = clean_input
- Spinner.spin = spinner_start
-
- sys.argv = sys.argv[:1]
- main()
diff --git a/spaces/Daniel-Saeedi/auto-debias/app.py b/spaces/Daniel-Saeedi/auto-debias/app.py
deleted file mode 100644
index 86e26ffa198cb61ecf20a956348f74e080dc6bca..0000000000000000000000000000000000000000
--- a/spaces/Daniel-Saeedi/auto-debias/app.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-
-import gc
-
-# Download models
-bert_debiased = pipeline('fill-mask', model='Daniel-Saeedi/auto-debias-gender-bert-base-uncased')
-bert_original = pipeline('fill-mask', model='bert-base-uncased')
-
-albert_debiased = pipeline('fill-mask', model='Daniel-Saeedi/auto-debias-albert-base-v2-race')
-albert_original = pipeline('fill-mask', model='albert-base-v2')
-
-def make_result(unmask):
- html = '
'
-
- for word in unmask:
- html += '
{} - Score: {}
'.format(word['token_str'],word['score'])
-
- html += '
'
- return html
-
-
-
-def fill_mask(stmt,model):
- if model == 'bert-base-uncased-gender-debiased':
- return "
-Note: it takes about 30 seconds to infer 3D pose on Hugginface Spaces without
-self-contacts and 2.5 minutes with self-contacts (uncheck it if the input character
-sketch does not have self-contacts).
-
-'''
-
-
-def prepare():
- filename = "models_smplx_v1_1.zip"
- smpl_path = hf_hub_download(
- repo_id=REPO_ID,
- repo_type="model",
- filename=filename,
- use_auth_token=API_TOKEN,
- cache_dir=ASSET_DIR,
- )
- if not (ASSET_DIR / filename).is_file():
- shutil.copy(smpl_path, ASSET_DIR)
-
- subprocess.run("bash ./scripts/download.sh".split())
- subprocess.run("bash ./scripts/prepare.sh".split())
-
-
-def main():
- prepare()
-
- save_dir = Path(SAVE_DIR)
- save_dir.mkdir(parents=True, exist_ok=True)
-
- def pose(img_path, use_cos=True, use_angle_transf=True, use_contacts=False, use_natural=True):
- if use_cos == False:
- use_angle_transf = False
-
- cmd = CMD.format(save_dir, img_path)
- if use_cos:
- cmd = cmd + " --use-cos"
- if use_angle_transf:
- cmd = cmd + " --use-angle-transf"
- if use_contacts:
- cmd = cmd + " --use-contacts"
- if use_natural:
- cmd = cmd + " --use-natural"
-
- out_dir = (save_dir / Path(img_path).name).with_suffix("")
- mesh_path = out_dir / "us.glb"
-
- if not mesh_path.is_file():
- subprocess.call(cmd.split())
-
- return str(mesh_path)
-
- examples = []
- use_contacts = torch.cuda.is_available()
- for img_path in Path("./data/images").glob("*"):
- examples.append([str(img_path), True, True, use_contacts, True])
-
- demo = gr.Interface(
- fn=pose,
- inputs=[
- gr.Image(type="filepath", label="Image"),
- gr.Checkbox(value=True, label="Bone lenghts"),
- gr.Checkbox(value=True, label="Foreshortening"),
- gr.Checkbox(value=use_contacts, label="Self-contacts", interactive=use_contacts),
- gr.Checkbox(value=True, label="Pose naturalness"),
- ],
- outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL 3D pose"),
- examples=examples[:1], # 5] + examples[6:6 + 4],
- title=TITLE,
- description=DESCRIPTION,
- )
-
- demo.launch()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/SJTU-CL/argugpt-detector/app.py b/spaces/SJTU-CL/argugpt-detector/app.py
deleted file mode 100644
index 977417513d79f1848ecd4f396b4547de935a8386..0000000000000000000000000000000000000000
--- a/spaces/SJTU-CL/argugpt-detector/app.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import nltk
-nltk.download('punkt')
-
-import pandas as pd
-import gradio as gr
-
-from nltk import sent_tokenize
-from transformers import pipeline
-from gradio.themes.utils import red, green
-
-detector = pipeline(task='text-classification', model='SJTU-CL/RoBERTa-large-ArguGPT-sent')
-
-color_map = {
- '0%': green.c400,
- '10%': green.c300,
- '20%': green.c200,
- '30%': green.c100,
- '40%': green.c50,
- '50%': red.c50,
- '60%': red.c100,
- '70%': red.c200,
- '80%': red.c300,
- '90%': red.c400,
- '100%': red.c500
-}
-
-
-def predict_doc(doc):
- sents = sent_tokenize(doc)
- data = {'sentence': [], 'label': [], 'score': []}
- res = []
- for sent in sents:
- prob = predict_one_sent(sent)
-
- data['sentence'].append(sent)
- data['score'].append(round(prob, 4))
- if prob <= 0.5:
- data['label'].append('Human')
- else: data['label'].append('Machine')
-
- if prob < 0.1: label = '0%'
- elif prob < 0.2: label = '10%'
- elif prob < 0.3: label = '20%'
- elif prob < 0.4: label = '30%'
- elif prob < 0.5: label = '40%'
- elif prob < 0.6: label = '50%'
- elif prob < 0.7: label = '60%'
- elif prob < 0.8: label = '70%'
- elif prob < 0.9: label = '80%'
- elif prob < 1: label = '90%'
- else: label = '100%'
- res.append((sent, label))
-
- df = pd.DataFrame(data)
- df.to_csv('result.csv')
- overall_score = df.score.mean()
- sum_str = ''
- if overall_score <= 0.5: overall_label = 'Human'
- else: overall_label = 'Machine'
- sum_str = f'The essay is probably written by {overall_label}. The probability of being generated by AI is {overall_score}'
-
- return sum_str, res, df, 'result.csv'
-
-
-def predict_one_sent(sent):
- '''
- convert to prob
- LABEL_1, 0.66 -> 0.66
- LABEL_0, 0.66 -> 0.34
- '''
- res = detector(sent)[0]
- org_label, prob = res['label'], res['score']
- if org_label == 'LABEL_0': prob = 1 - prob
- return prob
-
-
-with gr.Blocks() as demo:
- with gr.Row():
- with gr.Column():
- text_in = gr.Textbox(
- lines=5,
- label='Essay input',
- info='Please enter the essay in the textbox'
- )
- btn = gr.Button('Predict who writes this essay!')
-
- sent_res = gr.Highlight(
- label='Labeled Result'
- ).style(color_map=color_map)
-
- with gr.Row():
- summary = gr.Text(
- label='Result summary'
- )
- csv_f = gr.File(
- label='CSV file storing data with all sentences.'
- )
-
- tab = gr.DataFrame(
- label='Table with Probability Score',
- max_rows=100
- )
- btn.click(predict_doc, inputs=[text_in], outputs=[summary, sent_res, tab, csv_f], api_name='predict_doc')
-
-demo.launch()
-
diff --git a/spaces/SLU-CSCI5750-SP2022/homework03_DigitClassificationKNN/app.py b/spaces/SLU-CSCI5750-SP2022/homework03_DigitClassificationKNN/app.py
deleted file mode 100644
index 73b238933a4f420eb05ed9fa5c2eb0fc37ad5404..0000000000000000000000000000000000000000
--- a/spaces/SLU-CSCI5750-SP2022/homework03_DigitClassificationKNN/app.py
+++ /dev/null
@@ -1,146 +0,0 @@
-## CSCI4750/5750: homework03 submission
-## load the dataset
-def hw03_derive_MNIST_train_test_data():
- from sklearn.datasets import fetch_openml
- import numpy as np
- mnist = fetch_openml('mnist_784', version=1, as_frame=False)
- X, y = mnist["data"], mnist["target"]
- X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
- y_train = y_train.astype(np.int) # convert to int
- y_test = y_test.astype(np.int) # convert to int
- return X_train, X_test, y_train, y_test
-
-X_train, X_test, y_train, y_test = hw03_derive_MNIST_train_test_data()
-print("X_train.shape: ", X_train.shape)
-print("X_test.shape: ", X_test.shape)
-print("y_train.shape: ", y_train.shape)
-print("y_test.shape: ", y_test.shape)
-
-train_features = X_train
-train_labels = y_train
-test_feature = X_test[0]
-K = 3
-print("train_features: ",train_features.shape)
-print("train_labels: ",train_labels.shape)
-print("test_feature: ",test_feature.shape)
-
-# Practice 5: deploy our KNN classifier to web application, with multiple outputs
-
-import scipy
-import gradio as gr
-import numpy as np
-import cv2
-import os
-
-def get_sample_images(num_images):
- sample_images = []
- for i in range(num_images):
- test_feature = X_test[i]
- test_feature_2d =test_feature.reshape(28,28)
-
- # Make it unsigned integers:
- data = test_feature_2d.astype(np.uint8)
-
- outdir = "images_folder"
- img_path = os.path.join(outdir, 'local_%05d.png' % (i,))
- if not os.path.exists(outdir):
- os.mkdir(outdir)
- cv2.imwrite(img_path, data)
-
- sample_images.append([img_path,int(np.random.choice([7,9,11,13,15,24]))]) # ["image path", "K"]
- return sample_images
-
-# EXTRA: adapted from https://github.com/ageron/handson-ml2/blob/master/03_classification.ipynb
-def plot_digits(instances, images_per_row=3):
- import matplotlib.pyplot as plt
- import matplotlib as mpl
- size = 28
- images_per_row = min(len(instances), images_per_row)
- # This is equivalent to n_rows = ceil(len(instances) / images_per_row):
- n_rows = (len(instances) - 1) // images_per_row + 1
-
- n = len(instances)
-
- fig = plt.figure(figsize=(15,8))
- for i in range(len(instances)):
- # Debug, plot figure
- fig.add_subplot(n_rows, images_per_row, i + 1)
- #print(instances[i])
- plt.imshow(instances[i].reshape(size,size), cmap = mpl.cm.binary)
- plt.axis("off")
- plt.title("Neighbor "+str(i+1), size=20)
- fig.tight_layout()
-
- plt.savefig('results.png', dpi=300)
- return 'results.png'
-
-
-## machine learning classifier
-def KNN_predict(train_features, train_labels, test_feature, K):
- label_record = []
- for i in range(len(train_features)):
- train_point_feature = train_features[i]
- test_point_feature = test_feature
- ### (1) calculate distance between test feature and each of training data points
-
- # get distance for data point i
- dis = scipy.spatial.distance.euclidean(train_point_feature, test_point_feature)
-
- # collect lable for datapoint i
- y = train_labels[i]
- label_record.append((dis, y, train_point_feature))
-
- # sort data points by distance
- from operator import itemgetter
- sorted_labels = sorted(label_record,key=itemgetter(0))
- # get major class from top K neighbors
- major_class = []
- neighbor_imgs = []
- for k in range(K):
- major_class.append(sorted_labels[k][1])
-
- # at most 24 neighbors for visualization
- if k <24:
- neighbor_feature = sorted_labels[k][2]
- neighbor_imgs.append(neighbor_feature)
-
- ### get final prediction
- final_prediction = scipy.stats.mode(major_class).mode[0]
-
- ### get neighbor images and save to local
- neighbor_imgs =np.array(neighbor_imgs)
- image_path = plot_digits(neighbor_imgs, images_per_row=6)
-
- return final_prediction, image_path
-
-### main function for gradio to call to classify image
-def call_our_KNN(test_image, K=7):
- test_image_flatten = test_image.reshape((-1, 28*28))
- y_pred_each, image_path = KNN_predict(train_features, train_labels, test_image_flatten, int(K))
- return y_pred_each, image_path
-
-
-### generate several example cases
-sample_images = get_sample_images(10)
-
-### configure inputs/outputs
-set_image = gr.inputs.Image(shape=(28, 28), image_mode='L')
-set_K = gr.inputs.Slider(1, 24, step=1, default=7)
-
-set_label = gr.outputs.Textbox(label="Predicted Digit")
-set_out_images = gr.outputs.Image(label="Closest Neighbors")
-
-
-### configure gradio, detailed can be found at https://www.gradio.app/docs/#i_slider
-interface = gr.Interface(fn=call_our_KNN,
- inputs=[set_image, set_K],
- outputs=[set_label,set_out_images],
- examples_per_page = 2,
- examples = sample_images,
- title="CSCI4750/5750(hw03): Digit classification using KNN algorithm",
- description= "Click examples below for a quick demo",
- theme = 'huggingface',
- layout = 'vertical',
- live=True
- )
-interface.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/Sacpapa/Zoidberg/README.md b/spaces/Sacpapa/Zoidberg/README.md
deleted file mode 100644
index 67fa1285477cce8e50eeded3ee5372176461c1d5..0000000000000000000000000000000000000000
--- a/spaces/Sacpapa/Zoidberg/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Zoidberg
-emoji: 🐠
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Salesforce/BLIP/data/flickr30k_dataset.py b/spaces/Salesforce/BLIP/data/flickr30k_dataset.py
deleted file mode 100644
index 018ab387014ddaf554c4d3184cfc0e2ba8b2d487..0000000000000000000000000000000000000000
--- a/spaces/Salesforce/BLIP/data/flickr30k_dataset.py
+++ /dev/null
@@ -1,93 +0,0 @@
-import os
-import json
-
-from torch.utils.data import Dataset
-from torchvision.datasets.utils import download_url
-
-from PIL import Image
-
-from data.utils import pre_caption
-
-class flickr30k_train(Dataset):
- def __init__(self, transform, image_root, ann_root, max_words=30, prompt=''):
- '''
- image_root (string): Root directory of images (e.g. flickr30k/)
- ann_root (string): directory to store the annotation file
- '''
- url = 'https://storage.googleapis.com/sfr-vision-language-research/datasets/flickr30k_train.json'
- filename = 'flickr30k_train.json'
-
- download_url(url,ann_root)
-
- self.annotation = json.load(open(os.path.join(ann_root,filename),'r'))
- self.transform = transform
- self.image_root = image_root
- self.max_words = max_words
- self.prompt = prompt
-
- self.img_ids = {}
- n = 0
- for ann in self.annotation:
- img_id = ann['image_id']
- if img_id not in self.img_ids.keys():
- self.img_ids[img_id] = n
- n += 1
-
- def __len__(self):
- return len(self.annotation)
-
- def __getitem__(self, index):
-
- ann = self.annotation[index]
-
- image_path = os.path.join(self.image_root,ann['image'])
- image = Image.open(image_path).convert('RGB')
- image = self.transform(image)
-
- caption = self.prompt+pre_caption(ann['caption'], self.max_words)
-
- return image, caption, self.img_ids[ann['image_id']]
-
-
-class flickr30k_retrieval_eval(Dataset):
- def __init__(self, transform, image_root, ann_root, split, max_words=30):
- '''
- image_root (string): Root directory of images (e.g. flickr30k/)
- ann_root (string): directory to store the annotation file
- split (string): val or test
- '''
- urls = {'val':'https://storage.googleapis.com/sfr-vision-language-research/datasets/flickr30k_val.json',
- 'test':'https://storage.googleapis.com/sfr-vision-language-research/datasets/flickr30k_test.json'}
- filenames = {'val':'flickr30k_val.json','test':'flickr30k_test.json'}
-
- download_url(urls[split],ann_root)
-
- self.annotation = json.load(open(os.path.join(ann_root,filenames[split]),'r'))
- self.transform = transform
- self.image_root = image_root
-
- self.text = []
- self.image = []
- self.txt2img = {}
- self.img2txt = {}
-
- txt_id = 0
- for img_id, ann in enumerate(self.annotation):
- self.image.append(ann['image'])
- self.img2txt[img_id] = []
- for i, caption in enumerate(ann['caption']):
- self.text.append(pre_caption(caption,max_words))
- self.img2txt[img_id].append(txt_id)
- self.txt2img[txt_id] = img_id
- txt_id += 1
-
- def __len__(self):
- return len(self.annotation)
-
- def __getitem__(self, index):
-
- image_path = os.path.join(self.image_root, self.annotation[index]['image'])
- image = Image.open(image_path).convert('RGB')
- image = self.transform(image)
-
- return image, index
\ No newline at end of file
diff --git a/spaces/Sanathkumar1603/hackathon/app/main.py b/spaces/Sanathkumar1603/hackathon/app/main.py
deleted file mode 100644
index fdabbf687ca922516ef16da33b010e5402f48861..0000000000000000000000000000000000000000
--- a/spaces/Sanathkumar1603/hackathon/app/main.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import sys
-from pathlib import Path
-sys.path.append(str(Path(__file__).resolve().parent.parent))
-#print(sys.path)
-from typing import Any
-
-from fastapi import FastAPI, Request, APIRouter, File, UploadFile
-from fastapi.staticfiles import StaticFiles
-from fastapi.templating import Jinja2Templates
-from fastapi.middleware.cors import CORSMiddleware
-from app.config import settings
-from app import __version__
-from app.Hackathon_setup import face_recognition, exp_recognition
-
-import numpy as np
-from PIL import Image
-
-
-app = FastAPI(
- title=settings.PROJECT_NAME, openapi_url=f"{settings.API_V1_STR}/openapi.json"
-)
-
-# To store files uploaded by users
-app.mount("/static", StaticFiles(directory="app/static"), name="static")
-
-# To access Templates directory
-templates = Jinja2Templates(directory="app/templates")
-
-simi_filename1 = None
-simi_filename2 = None
-face_rec_filename = None
-expr_rec_filename = None
-
-
-#################################### Home Page endpoints #################################################
-@app.get("/")
-async def root(request: Request):
- return templates.TemplateResponse("index.html", {'request': request,})
-
-
-#################################### Face Similarity endpoints #################################################
-@app.get("/similarity/")
-async def similarity_root(request: Request):
- return templates.TemplateResponse("similarity.html", {'request': request,})
-
-
-@app.post("/predict_similarity/")
-async def create_upload_files(request: Request, file1: UploadFile = File(...), file2: UploadFile = File(...)):
- global simi_filename1
- global simi_filename2
-
- if 'image' in file1.content_type:
- contents = await file1.read()
- simi_filename1 = 'app/static/' + file1.filename
- with open(simi_filename1, 'wb') as f:
- f.write(contents)
-
- if 'image' in file2.content_type:
- contents = await file2.read()
- simi_filename2 = 'app/static/' + file2.filename
- with open(simi_filename2, 'wb') as f:
- f.write(contents)
-
- img1 = Image.open(simi_filename1)
- img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
-
- img2 = Image.open(simi_filename2)
- img2 = np.array(img2).reshape(img2.size[1], img2.size[0], 3).astype(np.uint8)
-
- result = face_recognition.get_similarity(img1, img2)
- #print(result)
-
- return templates.TemplateResponse("predict_similarity.html", {"request": request,
- "result": np.round(result, 3),
- "simi_filename1": '../static/'+file1.filename,
- "simi_filename2": '../static/'+file2.filename,})
-
-
-#################################### Face Recognition endpoints #################################################
-@app.get("/face_recognition/")
-async def face_recognition_root(request: Request):
- return templates.TemplateResponse("face_recognition.html", {'request': request,})
-
-
-@app.post("/predict_face_recognition/")
-async def create_upload_files(request: Request, file3: UploadFile = File(...)):
- global face_rec_filename
-
- if 'image' in file3.content_type:
- contents = await file3.read()
- face_rec_filename = 'app/static/' + file3.filename
- with open(face_rec_filename, 'wb') as f:
- f.write(contents)
-
- img1 = Image.open(face_rec_filename)
- img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
-
- result = face_recognition.get_face_class(img1)
- print(result)
-
- return templates.TemplateResponse("predict_face_recognition.html", {"request": request,
- "result": result,
- "face_rec_filename": '../static/'+file3.filename,})
-
-
-#################################### Expresion Recognition endpoints #################################################
-@app.get("/expr_recognition/")
-async def expr_recognition_root(request: Request):
- return templates.TemplateResponse("expr_recognition.html", {'request': request,})
-
-
-@app.post("/predict_expr_recognition/")
-async def create_upload_files(request: Request, file4: UploadFile = File(...)):
- global expr_rec_filename
-
- if 'image' in file4.content_type:
- contents = await file4.read()
- expr_rec_filename = 'app/static/' + file4.filename
- with open(expr_rec_filename, 'wb') as f:
- f.write(contents)
-
- img1 = Image.open(expr_rec_filename)
- img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
-
- result = exp_recognition.get_expression(img1)
- print(result)
-
- return templates.TemplateResponse("predict_expr_recognition.html", {"request": request,
- "result": result,
- "expr_rec_filename": '../static/'+file4.filename,})
-
-
-
-# Set all CORS enabled origins
-if settings.BACKEND_CORS_ORIGINS:
- app.add_middleware(
- CORSMiddleware,
- allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
- )
-
-
-# Start app
-if __name__ == "__main__":
- import uvicorn
- uvicorn.run(app, host="0.0.0.0", port=8001)
diff --git a/spaces/ServerX/PorcoDiaz/infer/modules/ipex/__init__.py.py b/spaces/ServerX/PorcoDiaz/infer/modules/ipex/__init__.py.py
deleted file mode 100644
index 9f53b2d3f7025b2d71369dababa4e6f2a4affc48..0000000000000000000000000000000000000000
--- a/spaces/ServerX/PorcoDiaz/infer/modules/ipex/__init__.py.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import os
-import sys
-import contextlib
-import torch
-import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
-from .hijacks import ipex_hijacks
-from .attention import attention_init
-
-# pylint: disable=protected-access, missing-function-docstring, line-too-long
-
-def ipex_init(): # pylint: disable=too-many-statements
- try:
- #Replace cuda with xpu:
- torch.cuda.current_device = torch.xpu.current_device
- torch.cuda.current_stream = torch.xpu.current_stream
- torch.cuda.device = torch.xpu.device
- torch.cuda.device_count = torch.xpu.device_count
- torch.cuda.device_of = torch.xpu.device_of
- torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard
- torch.cuda.get_device_name = torch.xpu.get_device_name
- torch.cuda.get_device_properties = torch.xpu.get_device_properties
- torch.cuda.init = torch.xpu.init
- torch.cuda.is_available = torch.xpu.is_available
- torch.cuda.is_initialized = torch.xpu.is_initialized
- torch.cuda.is_current_stream_capturing = lambda: False
- torch.cuda.set_device = torch.xpu.set_device
- torch.cuda.stream = torch.xpu.stream
- torch.cuda.synchronize = torch.xpu.synchronize
- torch.cuda.Event = torch.xpu.Event
- torch.cuda.Stream = torch.xpu.Stream
- torch.cuda.FloatTensor = torch.xpu.FloatTensor
- torch.Tensor.cuda = torch.Tensor.xpu
- torch.Tensor.is_cuda = torch.Tensor.is_xpu
- torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
- torch.cuda._initialized = torch.xpu.lazy_init._initialized
- torch.cuda._lazy_seed_tracker = torch.xpu.lazy_init._lazy_seed_tracker
- torch.cuda._queued_calls = torch.xpu.lazy_init._queued_calls
- torch.cuda._tls = torch.xpu.lazy_init._tls
- torch.cuda.threading = torch.xpu.lazy_init.threading
- torch.cuda.traceback = torch.xpu.lazy_init.traceback
- torch.cuda.Optional = torch.xpu.Optional
- torch.cuda.__cached__ = torch.xpu.__cached__
- torch.cuda.__loader__ = torch.xpu.__loader__
- torch.cuda.ComplexFloatStorage = torch.xpu.ComplexFloatStorage
- torch.cuda.Tuple = torch.xpu.Tuple
- torch.cuda.streams = torch.xpu.streams
- torch.cuda._lazy_new = torch.xpu._lazy_new
- torch.cuda.FloatStorage = torch.xpu.FloatStorage
- torch.cuda.Any = torch.xpu.Any
- torch.cuda.__doc__ = torch.xpu.__doc__
- torch.cuda.default_generators = torch.xpu.default_generators
- torch.cuda.HalfTensor = torch.xpu.HalfTensor
- torch.cuda._get_device_index = torch.xpu._get_device_index
- torch.cuda.__path__ = torch.xpu.__path__
- torch.cuda.Device = torch.xpu.Device
- torch.cuda.IntTensor = torch.xpu.IntTensor
- torch.cuda.ByteStorage = torch.xpu.ByteStorage
- torch.cuda.set_stream = torch.xpu.set_stream
- torch.cuda.BoolStorage = torch.xpu.BoolStorage
- torch.cuda.os = torch.xpu.os
- torch.cuda.torch = torch.xpu.torch
- torch.cuda.BFloat16Storage = torch.xpu.BFloat16Storage
- torch.cuda.Union = torch.xpu.Union
- torch.cuda.DoubleTensor = torch.xpu.DoubleTensor
- torch.cuda.ShortTensor = torch.xpu.ShortTensor
- torch.cuda.LongTensor = torch.xpu.LongTensor
- torch.cuda.IntStorage = torch.xpu.IntStorage
- torch.cuda.LongStorage = torch.xpu.LongStorage
- torch.cuda.__annotations__ = torch.xpu.__annotations__
- torch.cuda.__package__ = torch.xpu.__package__
- torch.cuda.__builtins__ = torch.xpu.__builtins__
- torch.cuda.CharTensor = torch.xpu.CharTensor
- torch.cuda.List = torch.xpu.List
- torch.cuda._lazy_init = torch.xpu._lazy_init
- torch.cuda.BFloat16Tensor = torch.xpu.BFloat16Tensor
- torch.cuda.DoubleStorage = torch.xpu.DoubleStorage
- torch.cuda.ByteTensor = torch.xpu.ByteTensor
- torch.cuda.StreamContext = torch.xpu.StreamContext
- torch.cuda.ComplexDoubleStorage = torch.xpu.ComplexDoubleStorage
- torch.cuda.ShortStorage = torch.xpu.ShortStorage
- torch.cuda._lazy_call = torch.xpu._lazy_call
- torch.cuda.HalfStorage = torch.xpu.HalfStorage
- torch.cuda.random = torch.xpu.random
- torch.cuda._device = torch.xpu._device
- torch.cuda.classproperty = torch.xpu.classproperty
- torch.cuda.__name__ = torch.xpu.__name__
- torch.cuda._device_t = torch.xpu._device_t
- torch.cuda.warnings = torch.xpu.warnings
- torch.cuda.__spec__ = torch.xpu.__spec__
- torch.cuda.BoolTensor = torch.xpu.BoolTensor
- torch.cuda.CharStorage = torch.xpu.CharStorage
- torch.cuda.__file__ = torch.xpu.__file__
- torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
- #torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
-
- #Memory:
- torch.cuda.memory = torch.xpu.memory
- if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
- torch.xpu.empty_cache = lambda: None
- torch.cuda.empty_cache = torch.xpu.empty_cache
- torch.cuda.memory_stats = torch.xpu.memory_stats
- torch.cuda.memory_summary = torch.xpu.memory_summary
- torch.cuda.memory_snapshot = torch.xpu.memory_snapshot
- torch.cuda.memory_allocated = torch.xpu.memory_allocated
- torch.cuda.max_memory_allocated = torch.xpu.max_memory_allocated
- torch.cuda.memory_reserved = torch.xpu.memory_reserved
- torch.cuda.memory_cached = torch.xpu.memory_reserved
- torch.cuda.max_memory_reserved = torch.xpu.max_memory_reserved
- torch.cuda.max_memory_cached = torch.xpu.max_memory_reserved
- torch.cuda.reset_peak_memory_stats = torch.xpu.reset_peak_memory_stats
- torch.cuda.reset_max_memory_cached = torch.xpu.reset_peak_memory_stats
- torch.cuda.reset_max_memory_allocated = torch.xpu.reset_peak_memory_stats
- torch.cuda.memory_stats_as_nested_dict = torch.xpu.memory_stats_as_nested_dict
- torch.cuda.reset_accumulated_memory_stats = torch.xpu.reset_accumulated_memory_stats
-
- #RNG:
- torch.cuda.get_rng_state = torch.xpu.get_rng_state
- torch.cuda.get_rng_state_all = torch.xpu.get_rng_state_all
- torch.cuda.set_rng_state = torch.xpu.set_rng_state
- torch.cuda.set_rng_state_all = torch.xpu.set_rng_state_all
- torch.cuda.manual_seed = torch.xpu.manual_seed
- torch.cuda.manual_seed_all = torch.xpu.manual_seed_all
- torch.cuda.seed = torch.xpu.seed
- torch.cuda.seed_all = torch.xpu.seed_all
- torch.cuda.initial_seed = torch.xpu.initial_seed
-
- #AMP:
- torch.cuda.amp = torch.xpu.amp
- if not hasattr(torch.cuda.amp, "common"):
- torch.cuda.amp.common = contextlib.nullcontext()
- torch.cuda.amp.common.amp_definitely_not_available = lambda: False
- try:
- torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
- except Exception: # pylint: disable=broad-exception-caught
- try:
- from .gradscaler import gradscaler_init # pylint: disable=import-outside-toplevel, import-error
- gradscaler_init()
- torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
- except Exception: # pylint: disable=broad-exception-caught
- torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
-
- #C
- torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream
- ipex._C._DeviceProperties.major = 2023
- ipex._C._DeviceProperties.minor = 2
-
- #Fix functions with ipex:
- torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_allocated(device)), torch.xpu.get_device_properties(device).total_memory]
- torch._utils._get_available_device_type = lambda: "xpu"
- torch.has_cuda = True
- torch.cuda.has_half = True
- torch.cuda.is_bf16_supported = lambda *args, **kwargs: True
- torch.cuda.is_fp16_supported = lambda *args, **kwargs: True
- torch.version.cuda = "11.7"
- torch.cuda.get_device_capability = lambda *args, **kwargs: [11,7]
- torch.cuda.get_device_properties.major = 11
- torch.cuda.get_device_properties.minor = 7
- torch.cuda.ipc_collect = lambda *args, **kwargs: None
- torch.cuda.utilization = lambda *args, **kwargs: 0
-
- ipex_hijacks()
- attention_init()
- except Exception as e:
- return False, e
- return True, None
\ No newline at end of file
diff --git a/spaces/Silentlin/DiffSinger/checkpoints/cleaner.py b/spaces/Silentlin/DiffSinger/checkpoints/cleaner.py
deleted file mode 100644
index 285ee8304af35d49d237d99147830dbfd0206eb1..0000000000000000000000000000000000000000
--- a/spaces/Silentlin/DiffSinger/checkpoints/cleaner.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import sys
-import torch
-
-if __name__ == '__main__':
- ckpt_path = sys.argv[1]
- checkpoint = torch.load(ckpt_path, map_location='cpu')
- checkpoint = {'state_dict': checkpoint['state_dict']}
- torch.save(checkpoint, ckpt_path, _use_new_zipfile_serialization=False)
diff --git a/spaces/SrRaptor/Imagy/app.py b/spaces/SrRaptor/Imagy/app.py
deleted file mode 100644
index 2759270b58ebcd3831bc26a0a53af30b91bce129..0000000000000000000000000000000000000000
--- a/spaces/SrRaptor/Imagy/app.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from matplotlib.pyplot import title
-from yaml import Mark
-from music import music_gen #Funtion to generate music based on ABC files
-import gradio as gr
-import os
-from MC.markov_chain import main_markov #Function to generate music based on midi files
-
-keysignature = ["C","G","D","A","No selection"]
-difficulty = ["beginner","intermediate","expert"]
-timesignature = ['3/4','4/4','2/2','2/4']
-
-# output = gr.Gallery() if GlobalUIGallery else "image"
-
-# interface = gr.Interface(fn = music_gen,
-# inputs=[gr.Radio(difficulty,label="Difficulty"),
-# gr.Radio(timesignature,label="Time Signature"),
-# gr.Dropdown(keysignature,label="Key Signature")],
-# outputs = [gr.Gallery(label="Sheet Music"),gr.Audio(label="Audio")],
-# title="Sheet Music Generation for Sight-Reading",
-# description="TO be added")
-# interface.launch(inline=False)
-
-with gr.Blocks() as demo:
-
- gr.Markdown("""
- ## Sight-reading generator for sheet music.
- Markov models which generate sheet music based on different input
- parameters given by the user.
- """)
- with gr.Tabs():
- with gr.TabItem("ABC Model"):
- gr.Markdown("N-grams model using ABC data as training data.")
- with gr.Row():
- with gr.Column():
- difficulty_input_abc = gr.Radio(difficulty,label="Difficulty") #input
- time_sig_input_abc = gr.Radio(timesignature,label="Time Signature") #input
- key_sig_input_abc = gr.Dropdown(keysignature,label="Key Signature") #input
- with gr.Row():
- abc_button = gr.Button("Create Music!!") #Submit
-
- with gr.Column():
- output_gallery_abc = gr.Gallery(label="Sheet Music")
- output_audio_abc = gr.Audio(label="Audio")
- with gr.TabItem("MIDI Model"):
- gr.Markdown("Markov model using MIDI fata as training data.")
- with gr.Row():
- with gr.Column():
- # difficulty_input_midi = gr.Radio(difficulty,label="Difficulty") #input
- time_sig_input_midi = gr.Radio(timesignature,label="Time Signature") #input
- # key_sig_input_midi = gr.Dropdown(keysignature,label="Key Signature") #input
- with gr.Row():
- midi_button = gr.Button("Create Music!!") #Submit
-
- with gr.Column():
- output_gallery_midi = gr.Gallery(label="Sheet Music")
- output_audio_midi = gr.Audio(label="Audio")
-
- abc_button.click(music_gen, inputs=[difficulty_input_abc,time_sig_input_abc,key_sig_input_abc], outputs=[output_gallery_abc,output_audio_abc])
- midi_button.click(main_markov, inputs= time_sig_input_midi, outputs=[output_gallery_midi,output_audio_midi])
-
-if __name__ == "__main__":
- demo.launch()
-
diff --git a/spaces/Sudhansu/05GR-Image-To-Multilingual-OCR/README.md b/spaces/Sudhansu/05GR-Image-To-Multilingual-OCR/README.md
deleted file mode 100644
index da63e86a0583ed698f144c8d5f57080149764d34..0000000000000000000000000000000000000000
--- a/spaces/Sudhansu/05GR-Image-To-Multilingual-OCR/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 05GR Image To Multilingual OCR
-emoji: 👁
-colorFrom: gray
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.6
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/dbapi/cursor.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/dbapi/cursor.py
deleted file mode 100644
index b8f23452ac6922713dd45c86201787bf5fd735e6..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/dbapi/cursor.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import logging
-import re
-
-from typing import Optional, Sequence
-
-from clickhouse_connect.datatypes.registry import get_from_name
-from clickhouse_connect.driver.common import unescape_identifier
-from clickhouse_connect.driver.exceptions import ProgrammingError
-from clickhouse_connect.driver import Client
-from clickhouse_connect.driver.parser import parse_callable
-from clickhouse_connect.driver.query import remove_sql_comments
-
-logger = logging.getLogger(__name__)
-
-insert_re = re.compile(r'^\s*INSERT\s+INTO\s+(.*$)', re.IGNORECASE)
-str_type = get_from_name('String')
-int_type = get_from_name('Int32')
-
-
-class Cursor:
- """
- See :ref:`https://peps.python.org/pep-0249/`
- """
-
- def __init__(self, client: Client):
- self.client = client
- self.arraysize = 1
- self.data: Optional[Sequence] = None
- self.names = []
- self.types = []
- self._rowcount = 0
- self._ix = 0
-
- def check_valid(self):
- if self.data is None:
- raise ProgrammingError('Cursor is not valid')
-
- @property
- def description(self):
- return [(n, t, None, None, None, None, True) for n, t in zip(self.names, self.types)]
-
- @property
- def rowcount(self):
- return self._rowcount
-
- def close(self):
- self.data = None
-
- def execute(self, operation: str, parameters=None):
- query_result = self.client.query(operation, parameters)
- self.data = query_result.result_set
- self._rowcount = len(self.data)
- if query_result.column_names:
- self.names = query_result.column_names
- self.types = [x.name for x in query_result.column_types]
- elif self.data:
- self.names = [f'col_{x}' for x in range(len(self.data[0]))]
- self.types = [x.__class__ for x in self.data[0]]
-
- def _try_bulk_insert(self, operation: str, data):
- match = insert_re.match(remove_sql_comments(operation))
- if not match:
- return False
- temp = match.group(1)
- table_end = min(temp.find(' '), temp.find('('))
- table = temp[:table_end].strip()
- temp = temp[table_end:].strip()
- if temp[0] == '(':
- _, op_columns, temp = parse_callable(temp)
- else:
- op_columns = None
- if 'VALUES' not in temp.upper():
- return False
- col_names = list(data[0].keys())
- if op_columns and {unescape_identifier(x) for x in op_columns} != set(col_names):
- return False # Data sent in doesn't match the columns in the insert statement
- data_values = [list(row.values()) for row in data]
- self.client.insert(table, data_values, col_names)
- self.data = []
- return True
-
- def executemany(self, operation, parameters):
- if not parameters or self._try_bulk_insert(operation, parameters):
- return
- self.data = []
- try:
- for param_row in parameters:
- query_result = self.client.query(operation, param_row)
- self.data.extend(query_result.result_set)
- if self.names or self.types:
- if query_result.column_names != self.names:
- logger.warning('Inconsistent column names %s : %s for operation %s in cursor executemany',
- self.names, query_result.column_names, operation)
- else:
- self.names = query_result.column_names
- self.types = query_result.column_types
- except TypeError as ex:
- raise ProgrammingError(f'Invalid parameters {parameters} passed to cursor executemany') from ex
- self._rowcount = len(self.data)
-
- def fetchall(self):
- self.check_valid()
- ret = self.data
- self._ix = self._rowcount
- return ret
-
- def fetchone(self):
- self.check_valid()
- if self._ix >= self._rowcount:
- return None
- val = self.data[self._ix]
- self._ix += 1
- return val
-
- def fetchmany(self, size: int = -1):
- self.check_valid()
- end = self._ix + max(size, self._rowcount - self._ix)
- ret = self.data[self._ix: end]
- self._ix = end
- return ret
-
- def nextset(self):
- raise NotImplementedError
-
- def callproc(self, *args, **kwargs):
- raise NotImplementedError
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/cfg.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/cfg.py
deleted file mode 100644
index 5e2f32290360abeca395fbb6ebb17e723b49fd3f..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/cfg.py
+++ /dev/null
@@ -1,463 +0,0 @@
-import sys
-
-# alias to keep the 'bytecode' variable free
-from _pydevd_frame_eval.vendored import bytecode as _bytecode
-from _pydevd_frame_eval.vendored.bytecode.concrete import ConcreteInstr
-from _pydevd_frame_eval.vendored.bytecode.flags import CompilerFlags
-from _pydevd_frame_eval.vendored.bytecode.instr import Label, SetLineno, Instr
-
-
-class BasicBlock(_bytecode._InstrList):
- def __init__(self, instructions=None):
- # a BasicBlock object, or None
- self.next_block = None
- if instructions:
- super().__init__(instructions)
-
- def __iter__(self):
- index = 0
- while index < len(self):
- instr = self[index]
- index += 1
-
- if not isinstance(instr, (SetLineno, Instr)):
- raise ValueError(
- "BasicBlock must only contain SetLineno and Instr objects, "
- "but %s was found" % instr.__class__.__name__
- )
-
- if isinstance(instr, Instr) and instr.has_jump():
- if index < len(self):
- raise ValueError(
- "Only the last instruction of a basic " "block can be a jump"
- )
-
- if not isinstance(instr.arg, BasicBlock):
- raise ValueError(
- "Jump target must a BasicBlock, got %s",
- type(instr.arg).__name__,
- )
-
- yield instr
-
- def __getitem__(self, index):
- value = super().__getitem__(index)
- if isinstance(index, slice):
- value = type(self)(value)
- value.next_block = self.next_block
-
- return value
-
- def copy(self):
- new = type(self)(super().copy())
- new.next_block = self.next_block
- return new
-
- def legalize(self, first_lineno):
- """Check that all the element of the list are valid and remove SetLineno."""
- lineno_pos = []
- set_lineno = None
- current_lineno = first_lineno
-
- for pos, instr in enumerate(self):
- if isinstance(instr, SetLineno):
- set_lineno = current_lineno = instr.lineno
- lineno_pos.append(pos)
- continue
- if set_lineno is not None:
- instr.lineno = set_lineno
- elif instr.lineno is None:
- instr.lineno = current_lineno
- else:
- current_lineno = instr.lineno
-
- for i in reversed(lineno_pos):
- del self[i]
-
- return current_lineno
-
- def get_jump(self):
- if not self:
- return None
-
- last_instr = self[-1]
- if not (isinstance(last_instr, Instr) and last_instr.has_jump()):
- return None
-
- target_block = last_instr.arg
- assert isinstance(target_block, BasicBlock)
- return target_block
-
-
-def _compute_stack_size(block, size, maxsize, *, check_pre_and_post=True):
- """Generator used to reduce the use of function stacks.
-
- This allows to avoid nested recursion and allow to treat more cases.
-
- HOW-TO:
- Following the methods of Trampoline
- (see https://en.wikipedia.org/wiki/Trampoline_(computing)),
-
- We yield either:
-
- - the arguments that would be used in the recursive calls, i.e,
- 'yield block, size, maxsize' instead of making a recursive call
- '_compute_stack_size(block, size, maxsize)', if we encounter an
- instruction jumping to another block or if the block is linked to
- another one (ie `next_block` is set)
- - the required stack from the stack if we went through all the instructions
- or encountered an unconditional jump.
-
- In the first case, the calling function is then responsible for creating a
- new generator with those arguments, iterating over it till exhaustion to
- determine the stacksize required by the block and resuming this function
- with the determined stacksize.
-
- """
- # If the block is currently being visited (seen = True) or if it was visited
- # previously by using a larger starting size than the one in use, return the
- # maxsize.
- if block.seen or block.startsize >= size:
- yield maxsize
-
- def update_size(pre_delta, post_delta, size, maxsize):
- size += pre_delta
- if size < 0:
- msg = "Failed to compute stacksize, got negative size"
- raise RuntimeError(msg)
- size += post_delta
- maxsize = max(maxsize, size)
- return size, maxsize
-
- # Prevent recursive visit of block if two blocks are nested (jump from one
- # to the other).
- block.seen = True
- block.startsize = size
-
- for instr in block:
-
- # Ignore SetLineno
- if isinstance(instr, SetLineno):
- continue
-
- # For instructions with a jump first compute the stacksize required when the
- # jump is taken.
- if instr.has_jump():
- effect = (
- instr.pre_and_post_stack_effect(jump=True)
- if check_pre_and_post
- else (instr.stack_effect(jump=True), 0)
- )
- taken_size, maxsize = update_size(*effect, size, maxsize)
- # Yield the parameters required to compute the stacksize required
- # by the block to which the jumnp points to and resume when we now
- # the maxsize.
- maxsize = yield instr.arg, taken_size, maxsize
-
- # For unconditional jumps abort early since the other instruction will
- # never be seen.
- if instr.is_uncond_jump():
- block.seen = False
- yield maxsize
-
- # jump=False: non-taken path of jumps, or any non-jump
- effect = (
- instr.pre_and_post_stack_effect(jump=False)
- if check_pre_and_post
- else (instr.stack_effect(jump=False), 0)
- )
- size, maxsize = update_size(*effect, size, maxsize)
-
- if block.next_block:
- maxsize = yield block.next_block, size, maxsize
-
- block.seen = False
- yield maxsize
-
-
-class ControlFlowGraph(_bytecode.BaseBytecode):
- def __init__(self):
- super().__init__()
- self._blocks = []
- self._block_index = {}
- self.argnames = []
-
- self.add_block()
-
- def legalize(self):
- """Legalize all blocks."""
- current_lineno = self.first_lineno
- for block in self._blocks:
- current_lineno = block.legalize(current_lineno)
-
- def get_block_index(self, block):
- try:
- return self._block_index[id(block)]
- except KeyError:
- raise ValueError("the block is not part of this bytecode")
-
- def _add_block(self, block):
- block_index = len(self._blocks)
- self._blocks.append(block)
- self._block_index[id(block)] = block_index
-
- def add_block(self, instructions=None):
- block = BasicBlock(instructions)
- self._add_block(block)
- return block
-
- def compute_stacksize(self, *, check_pre_and_post=True):
- """Compute the stack size by iterating through the blocks
-
- The implementation make use of a generator function to avoid issue with
- deeply nested recursions.
-
- """
- # In the absence of any block return 0
- if not self:
- return 0
-
- # Ensure that previous calculation do not impact this one.
- for block in self:
- block.seen = False
- block.startsize = -32768 # INT_MIN
-
- # Starting with Python 3.10, generator and coroutines start with one object
- # on the stack (None, anything is an error).
- initial_stack_size = 0
- if sys.version_info >= (3, 10) and self.flags & (
- CompilerFlags.GENERATOR
- | CompilerFlags.COROUTINE
- | CompilerFlags.ASYNC_GENERATOR
- ):
- initial_stack_size = 1
-
- # Create a generator/coroutine responsible of dealing with the first block
- coro = _compute_stack_size(
- self[0], initial_stack_size, 0, check_pre_and_post=check_pre_and_post
- )
-
- # Create a list of generator that have not yet been exhausted
- coroutines = []
-
- push_coroutine = coroutines.append
- pop_coroutine = coroutines.pop
- args = None
-
- try:
- while True:
- args = coro.send(None)
-
- # Consume the stored generators as long as they return a simple
- # interger that is to be used to resume the last stored generator.
- while isinstance(args, int):
- coro = pop_coroutine()
- args = coro.send(args)
-
- # Otherwise we enter a new block and we store the generator under
- # use and create a new one to process the new block
- push_coroutine(coro)
- coro = _compute_stack_size(*args, check_pre_and_post=check_pre_and_post)
-
- except IndexError:
- # The exception occurs when all the generators have been exhausted
- # in which case teh last yielded value is the stacksize.
- assert args is not None
- return args
-
- def __repr__(self):
- return "" % len(self._blocks)
-
- def get_instructions(self):
- instructions = []
- jumps = []
-
- for block in self:
- target_block = block.get_jump()
- if target_block is not None:
- instr = block[-1]
- instr = ConcreteInstr(instr.name, 0, lineno=instr.lineno)
- jumps.append((target_block, instr))
-
- instructions.extend(block[:-1])
- instructions.append(instr)
- else:
- instructions.extend(block)
-
- for target_block, instr in jumps:
- instr.arg = self.get_block_index(target_block)
-
- return instructions
-
- def __eq__(self, other):
- if type(self) != type(other):
- return False
-
- if self.argnames != other.argnames:
- return False
-
- instrs1 = self.get_instructions()
- instrs2 = other.get_instructions()
- if instrs1 != instrs2:
- return False
- # FIXME: compare block.next_block
-
- return super().__eq__(other)
-
- def __len__(self):
- return len(self._blocks)
-
- def __iter__(self):
- return iter(self._blocks)
-
- def __getitem__(self, index):
- if isinstance(index, BasicBlock):
- index = self.get_block_index(index)
- return self._blocks[index]
-
- def __delitem__(self, index):
- if isinstance(index, BasicBlock):
- index = self.get_block_index(index)
- block = self._blocks[index]
- del self._blocks[index]
- del self._block_index[id(block)]
- for index in range(index, len(self)):
- block = self._blocks[index]
- self._block_index[id(block)] -= 1
-
- def split_block(self, block, index):
- if not isinstance(block, BasicBlock):
- raise TypeError("expected block")
- block_index = self.get_block_index(block)
-
- if index < 0:
- raise ValueError("index must be positive")
-
- block = self._blocks[block_index]
- if index == 0:
- return block
-
- if index > len(block):
- raise ValueError("index out of the block")
-
- instructions = block[index:]
- if not instructions:
- if block_index + 1 < len(self):
- return self[block_index + 1]
-
- del block[index:]
-
- block2 = BasicBlock(instructions)
- block.next_block = block2
-
- for block in self[block_index + 1 :]:
- self._block_index[id(block)] += 1
-
- self._blocks.insert(block_index + 1, block2)
- self._block_index[id(block2)] = block_index + 1
-
- return block2
-
- @staticmethod
- def from_bytecode(bytecode):
- # label => instruction index
- label_to_block_index = {}
- jumps = []
- block_starts = {}
- for index, instr in enumerate(bytecode):
- if isinstance(instr, Label):
- label_to_block_index[instr] = index
- else:
- if isinstance(instr, Instr) and isinstance(instr.arg, Label):
- jumps.append((index, instr.arg))
-
- for target_index, target_label in jumps:
- target_index = label_to_block_index[target_label]
- block_starts[target_index] = target_label
-
- bytecode_blocks = _bytecode.ControlFlowGraph()
- bytecode_blocks._copy_attr_from(bytecode)
- bytecode_blocks.argnames = list(bytecode.argnames)
-
- # copy instructions, convert labels to block labels
- block = bytecode_blocks[0]
- labels = {}
- jumps = []
- for index, instr in enumerate(bytecode):
- if index in block_starts:
- old_label = block_starts[index]
- if index != 0:
- new_block = bytecode_blocks.add_block()
- if not block[-1].is_final():
- block.next_block = new_block
- block = new_block
- if old_label is not None:
- labels[old_label] = block
- elif block and isinstance(block[-1], Instr):
- if block[-1].is_final():
- block = bytecode_blocks.add_block()
- elif block[-1].has_jump():
- new_block = bytecode_blocks.add_block()
- block.next_block = new_block
- block = new_block
-
- if isinstance(instr, Label):
- continue
-
- # don't copy SetLineno objects
- if isinstance(instr, Instr):
- instr = instr.copy()
- if isinstance(instr.arg, Label):
- jumps.append(instr)
- block.append(instr)
-
- for instr in jumps:
- label = instr.arg
- instr.arg = labels[label]
-
- return bytecode_blocks
-
- def to_bytecode(self):
- """Convert to Bytecode."""
-
- used_blocks = set()
- for block in self:
- target_block = block.get_jump()
- if target_block is not None:
- used_blocks.add(id(target_block))
-
- labels = {}
- jumps = []
- instructions = []
-
- for block in self:
- if id(block) in used_blocks:
- new_label = Label()
- labels[id(block)] = new_label
- instructions.append(new_label)
-
- for instr in block:
- # don't copy SetLineno objects
- if isinstance(instr, Instr):
- instr = instr.copy()
- if isinstance(instr.arg, BasicBlock):
- jumps.append(instr)
- instructions.append(instr)
-
- # Map to new labels
- for instr in jumps:
- instr.arg = labels[id(instr.arg)]
-
- bytecode = _bytecode.Bytecode()
- bytecode._copy_attr_from(self)
- bytecode.argnames = list(self.argnames)
- bytecode[:] = instructions
-
- return bytecode
-
- def to_code(self, stacksize=None):
- """Convert to code."""
- if stacksize is None:
- stacksize = self.compute_stacksize()
- bc = self.to_bytecode()
- return bc.to_code(stacksize=stacksize)
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/ntdll.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/ntdll.py
deleted file mode 100644
index 39037661d1cbf3d86b0c42dc7c0465459ee13799..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/ntdll.py
+++ /dev/null
@@ -1,539 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2009-2014, Mario Vilas
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice,this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-"""
-Wrapper for ntdll.dll in ctypes.
-"""
-
-__revision__ = "$Id$"
-
-from winappdbg.win32.defines import *
-
-#==============================================================================
-# This is used later on to calculate the list of exported symbols.
-_all = None
-_all = set(vars().keys())
-_all.add('peb_teb')
-#==============================================================================
-
-from winappdbg.win32.peb_teb import *
-
-#--- Types --------------------------------------------------------------------
-
-SYSDBG_COMMAND = DWORD
-PROCESSINFOCLASS = DWORD
-THREADINFOCLASS = DWORD
-FILE_INFORMATION_CLASS = DWORD
-
-#--- Constants ----------------------------------------------------------------
-
-# DEP flags for ProcessExecuteFlags
-MEM_EXECUTE_OPTION_ENABLE = 1
-MEM_EXECUTE_OPTION_DISABLE = 2
-MEM_EXECUTE_OPTION_ATL7_THUNK_EMULATION = 4
-MEM_EXECUTE_OPTION_PERMANENT = 8
-
-# SYSTEM_INFORMATION_CLASS
-# http://www.informit.com/articles/article.aspx?p=22442&seqNum=4
-SystemBasicInformation = 1 # 0x002C
-SystemProcessorInformation = 2 # 0x000C
-SystemPerformanceInformation = 3 # 0x0138
-SystemTimeInformation = 4 # 0x0020
-SystemPathInformation = 5 # not implemented
-SystemProcessInformation = 6 # 0x00F8 + per process
-SystemCallInformation = 7 # 0x0018 + (n * 0x0004)
-SystemConfigurationInformation = 8 # 0x0018
-SystemProcessorCounters = 9 # 0x0030 per cpu
-SystemGlobalFlag = 10 # 0x0004
-SystemInfo10 = 11 # not implemented
-SystemModuleInformation = 12 # 0x0004 + (n * 0x011C)
-SystemLockInformation = 13 # 0x0004 + (n * 0x0024)
-SystemInfo13 = 14 # not implemented
-SystemPagedPoolInformation = 15 # checked build only
-SystemNonPagedPoolInformation = 16 # checked build only
-SystemHandleInformation = 17 # 0x0004 + (n * 0x0010)
-SystemObjectInformation = 18 # 0x0038+ + (n * 0x0030+)
-SystemPagefileInformation = 19 # 0x0018+ per page file
-SystemInstemulInformation = 20 # 0x0088
-SystemInfo20 = 21 # invalid info class
-SystemCacheInformation = 22 # 0x0024
-SystemPoolTagInformation = 23 # 0x0004 + (n * 0x001C)
-SystemProcessorStatistics = 24 # 0x0000, or 0x0018 per cpu
-SystemDpcInformation = 25 # 0x0014
-SystemMemoryUsageInformation1 = 26 # checked build only
-SystemLoadImage = 27 # 0x0018, set mode only
-SystemUnloadImage = 28 # 0x0004, set mode only
-SystemTimeAdjustmentInformation = 29 # 0x000C, 0x0008 writeable
-SystemMemoryUsageInformation2 = 30 # checked build only
-SystemInfo30 = 31 # checked build only
-SystemInfo31 = 32 # checked build only
-SystemCrashDumpInformation = 33 # 0x0004
-SystemExceptionInformation = 34 # 0x0010
-SystemCrashDumpStateInformation = 35 # 0x0008
-SystemDebuggerInformation = 36 # 0x0002
-SystemThreadSwitchInformation = 37 # 0x0030
-SystemRegistryQuotaInformation = 38 # 0x000C
-SystemLoadDriver = 39 # 0x0008, set mode only
-SystemPrioritySeparationInformation = 40 # 0x0004, set mode only
-SystemInfo40 = 41 # not implemented
-SystemInfo41 = 42 # not implemented
-SystemInfo42 = 43 # invalid info class
-SystemInfo43 = 44 # invalid info class
-SystemTimeZoneInformation = 45 # 0x00AC
-SystemLookasideInformation = 46 # n * 0x0020
-# info classes specific to Windows 2000
-# WTS = Windows Terminal Server
-SystemSetTimeSlipEvent = 47 # set mode only
-SystemCreateSession = 48 # WTS, set mode only
-SystemDeleteSession = 49 # WTS, set mode only
-SystemInfo49 = 50 # invalid info class
-SystemRangeStartInformation = 51 # 0x0004
-SystemVerifierInformation = 52 # 0x0068
-SystemAddVerifier = 53 # set mode only
-SystemSessionProcessesInformation = 54 # WTS
-
-# NtQueryInformationProcess constants (from MSDN)
-##ProcessBasicInformation = 0
-##ProcessDebugPort = 7
-##ProcessWow64Information = 26
-##ProcessImageFileName = 27
-
-# PROCESS_INFORMATION_CLASS
-# http://undocumented.ntinternals.net/UserMode/Undocumented%20Functions/NT%20Objects/Process/PROCESS_INFORMATION_CLASS.html
-ProcessBasicInformation = 0
-ProcessQuotaLimits = 1
-ProcessIoCounters = 2
-ProcessVmCounters = 3
-ProcessTimes = 4
-ProcessBasePriority = 5
-ProcessRaisePriority = 6
-ProcessDebugPort = 7
-ProcessExceptionPort = 8
-ProcessAccessToken = 9
-ProcessLdtInformation = 10
-ProcessLdtSize = 11
-ProcessDefaultHardErrorMode = 12
-ProcessIoPortHandlers = 13
-ProcessPooledUsageAndLimits = 14
-ProcessWorkingSetWatch = 15
-ProcessUserModeIOPL = 16
-ProcessEnableAlignmentFaultFixup = 17
-ProcessPriorityClass = 18
-ProcessWx86Information = 19
-ProcessHandleCount = 20
-ProcessAffinityMask = 21
-ProcessPriorityBoost = 22
-
-ProcessWow64Information = 26
-ProcessImageFileName = 27
-
-# http://www.codeproject.com/KB/security/AntiReverseEngineering.aspx
-ProcessDebugObjectHandle = 30
-
-ProcessExecuteFlags = 34
-
-# THREAD_INFORMATION_CLASS
-ThreadBasicInformation = 0
-ThreadTimes = 1
-ThreadPriority = 2
-ThreadBasePriority = 3
-ThreadAffinityMask = 4
-ThreadImpersonationToken = 5
-ThreadDescriptorTableEntry = 6
-ThreadEnableAlignmentFaultFixup = 7
-ThreadEventPair = 8
-ThreadQuerySetWin32StartAddress = 9
-ThreadZeroTlsCell = 10
-ThreadPerformanceCount = 11
-ThreadAmILastThread = 12
-ThreadIdealProcessor = 13
-ThreadPriorityBoost = 14
-ThreadSetTlsArrayAddress = 15
-ThreadIsIoPending = 16
-ThreadHideFromDebugger = 17
-
-# OBJECT_INFORMATION_CLASS
-ObjectBasicInformation = 0
-ObjectNameInformation = 1
-ObjectTypeInformation = 2
-ObjectAllTypesInformation = 3
-ObjectHandleInformation = 4
-
-# FILE_INFORMATION_CLASS
-FileDirectoryInformation = 1
-FileFullDirectoryInformation = 2
-FileBothDirectoryInformation = 3
-FileBasicInformation = 4
-FileStandardInformation = 5
-FileInternalInformation = 6
-FileEaInformation = 7
-FileAccessInformation = 8
-FileNameInformation = 9
-FileRenameInformation = 10
-FileLinkInformation = 11
-FileNamesInformation = 12
-FileDispositionInformation = 13
-FilePositionInformation = 14
-FileFullEaInformation = 15
-FileModeInformation = 16
-FileAlignmentInformation = 17
-FileAllInformation = 18
-FileAllocationInformation = 19
-FileEndOfFileInformation = 20
-FileAlternateNameInformation = 21
-FileStreamInformation = 22
-FilePipeInformation = 23
-FilePipeLocalInformation = 24
-FilePipeRemoteInformation = 25
-FileMailslotQueryInformation = 26
-FileMailslotSetInformation = 27
-FileCompressionInformation = 28
-FileCopyOnWriteInformation = 29
-FileCompletionInformation = 30
-FileMoveClusterInformation = 31
-FileQuotaInformation = 32
-FileReparsePointInformation = 33
-FileNetworkOpenInformation = 34
-FileObjectIdInformation = 35
-FileTrackingInformation = 36
-FileOleDirectoryInformation = 37
-FileContentIndexInformation = 38
-FileInheritContentIndexInformation = 37
-FileOleInformation = 39
-FileMaximumInformation = 40
-
-# From http://www.nirsoft.net/kernel_struct/vista/EXCEPTION_DISPOSITION.html
-# typedef enum _EXCEPTION_DISPOSITION
-# {
-# ExceptionContinueExecution = 0,
-# ExceptionContinueSearch = 1,
-# ExceptionNestedException = 2,
-# ExceptionCollidedUnwind = 3
-# } EXCEPTION_DISPOSITION;
-ExceptionContinueExecution = 0
-ExceptionContinueSearch = 1
-ExceptionNestedException = 2
-ExceptionCollidedUnwind = 3
-
-#--- PROCESS_BASIC_INFORMATION structure --------------------------------------
-
-# From MSDN:
-#
-# typedef struct _PROCESS_BASIC_INFORMATION {
-# PVOID Reserved1;
-# PPEB PebBaseAddress;
-# PVOID Reserved2[2];
-# ULONG_PTR UniqueProcessId;
-# PVOID Reserved3;
-# } PROCESS_BASIC_INFORMATION;
-##class PROCESS_BASIC_INFORMATION(Structure):
-## _fields_ = [
-## ("Reserved1", PVOID),
-## ("PebBaseAddress", PPEB),
-## ("Reserved2", PVOID * 2),
-## ("UniqueProcessId", ULONG_PTR),
-## ("Reserved3", PVOID),
-##]
-
-# From http://catch22.net/tuts/tips2
-# (Only valid for 32 bits)
-#
-# typedef struct
-# {
-# ULONG ExitStatus;
-# PVOID PebBaseAddress;
-# ULONG AffinityMask;
-# ULONG BasePriority;
-# ULONG_PTR UniqueProcessId;
-# ULONG_PTR InheritedFromUniqueProcessId;
-# } PROCESS_BASIC_INFORMATION;
-
-# My own definition follows:
-class PROCESS_BASIC_INFORMATION(Structure):
- _fields_ = [
- ("ExitStatus", SIZE_T),
- ("PebBaseAddress", PVOID), # PPEB
- ("AffinityMask", KAFFINITY),
- ("BasePriority", SDWORD),
- ("UniqueProcessId", ULONG_PTR),
- ("InheritedFromUniqueProcessId", ULONG_PTR),
-]
-
-#--- THREAD_BASIC_INFORMATION structure ---------------------------------------
-
-# From http://undocumented.ntinternals.net/UserMode/Structures/THREAD_BASIC_INFORMATION.html
-#
-# typedef struct _THREAD_BASIC_INFORMATION {
-# NTSTATUS ExitStatus;
-# PVOID TebBaseAddress;
-# CLIENT_ID ClientId;
-# KAFFINITY AffinityMask;
-# KPRIORITY Priority;
-# KPRIORITY BasePriority;
-# } THREAD_BASIC_INFORMATION, *PTHREAD_BASIC_INFORMATION;
-class THREAD_BASIC_INFORMATION(Structure):
- _fields_ = [
- ("ExitStatus", NTSTATUS),
- ("TebBaseAddress", PVOID), # PTEB
- ("ClientId", CLIENT_ID),
- ("AffinityMask", KAFFINITY),
- ("Priority", SDWORD),
- ("BasePriority", SDWORD),
-]
-
-#--- FILE_NAME_INFORMATION structure ------------------------------------------
-
-# typedef struct _FILE_NAME_INFORMATION {
-# ULONG FileNameLength;
-# WCHAR FileName[1];
-# } FILE_NAME_INFORMATION, *PFILE_NAME_INFORMATION;
-class FILE_NAME_INFORMATION(Structure):
- _fields_ = [
- ("FileNameLength", ULONG),
- ("FileName", WCHAR * 1),
- ]
-
-#--- SYSDBG_MSR structure and constants ---------------------------------------
-
-SysDbgReadMsr = 16
-SysDbgWriteMsr = 17
-
-class SYSDBG_MSR(Structure):
- _fields_ = [
- ("Address", ULONG),
- ("Data", ULONGLONG),
-]
-
-#--- IO_STATUS_BLOCK structure ------------------------------------------------
-
-# typedef struct _IO_STATUS_BLOCK {
-# union {
-# NTSTATUS Status;
-# PVOID Pointer;
-# };
-# ULONG_PTR Information;
-# } IO_STATUS_BLOCK, *PIO_STATUS_BLOCK;
-class IO_STATUS_BLOCK(Structure):
- _fields_ = [
- ("Status", NTSTATUS),
- ("Information", ULONG_PTR),
- ]
- def __get_Pointer(self):
- return PVOID(self.Status)
- def __set_Pointer(self, ptr):
- self.Status = ptr.value
- Pointer = property(__get_Pointer, __set_Pointer)
-
-PIO_STATUS_BLOCK = POINTER(IO_STATUS_BLOCK)
-
-#--- ntdll.dll ----------------------------------------------------------------
-
-# ULONG WINAPI RtlNtStatusToDosError(
-# __in NTSTATUS Status
-# );
-def RtlNtStatusToDosError(Status):
- _RtlNtStatusToDosError = windll.ntdll.RtlNtStatusToDosError
- _RtlNtStatusToDosError.argtypes = [NTSTATUS]
- _RtlNtStatusToDosError.restype = ULONG
- return _RtlNtStatusToDosError(Status)
-
-# NTSYSAPI NTSTATUS NTAPI NtSystemDebugControl(
-# IN SYSDBG_COMMAND Command,
-# IN PVOID InputBuffer OPTIONAL,
-# IN ULONG InputBufferLength,
-# OUT PVOID OutputBuffer OPTIONAL,
-# IN ULONG OutputBufferLength,
-# OUT PULONG ReturnLength OPTIONAL
-# );
-def NtSystemDebugControl(Command, InputBuffer = None, InputBufferLength = None, OutputBuffer = None, OutputBufferLength = None):
- _NtSystemDebugControl = windll.ntdll.NtSystemDebugControl
- _NtSystemDebugControl.argtypes = [SYSDBG_COMMAND, PVOID, ULONG, PVOID, ULONG, PULONG]
- _NtSystemDebugControl.restype = NTSTATUS
-
- # Validate the input buffer
- if InputBuffer is None:
- if InputBufferLength is None:
- InputBufferLength = 0
- else:
- raise ValueError(
- "Invalid call to NtSystemDebugControl: "
- "input buffer length given but no input buffer!")
- else:
- if InputBufferLength is None:
- InputBufferLength = sizeof(InputBuffer)
- InputBuffer = byref(InputBuffer)
-
- # Validate the output buffer
- if OutputBuffer is None:
- if OutputBufferLength is None:
- OutputBufferLength = 0
- else:
- OutputBuffer = ctypes.create_string_buffer("", OutputBufferLength)
- elif OutputBufferLength is None:
- OutputBufferLength = sizeof(OutputBuffer)
-
- # Make the call (with an output buffer)
- if OutputBuffer is not None:
- ReturnLength = ULONG(0)
- ntstatus = _NtSystemDebugControl(Command, InputBuffer, InputBufferLength, byref(OutputBuffer), OutputBufferLength, byref(ReturnLength))
- if ntstatus != 0:
- raise ctypes.WinError( RtlNtStatusToDosError(ntstatus) )
- ReturnLength = ReturnLength.value
- if ReturnLength != OutputBufferLength:
- raise ctypes.WinError(ERROR_BAD_LENGTH)
- return OutputBuffer, ReturnLength
-
- # Make the call (without an output buffer)
- ntstatus = _NtSystemDebugControl(Command, InputBuffer, InputBufferLength, OutputBuffer, OutputBufferLength, None)
- if ntstatus != 0:
- raise ctypes.WinError( RtlNtStatusToDosError(ntstatus) )
-
-ZwSystemDebugControl = NtSystemDebugControl
-
-# NTSTATUS WINAPI NtQueryInformationProcess(
-# __in HANDLE ProcessHandle,
-# __in PROCESSINFOCLASS ProcessInformationClass,
-# __out PVOID ProcessInformation,
-# __in ULONG ProcessInformationLength,
-# __out_opt PULONG ReturnLength
-# );
-def NtQueryInformationProcess(ProcessHandle, ProcessInformationClass, ProcessInformationLength = None):
- _NtQueryInformationProcess = windll.ntdll.NtQueryInformationProcess
- _NtQueryInformationProcess.argtypes = [HANDLE, PROCESSINFOCLASS, PVOID, ULONG, PULONG]
- _NtQueryInformationProcess.restype = NTSTATUS
- if ProcessInformationLength is not None:
- ProcessInformation = ctypes.create_string_buffer("", ProcessInformationLength)
- else:
- if ProcessInformationClass == ProcessBasicInformation:
- ProcessInformation = PROCESS_BASIC_INFORMATION()
- ProcessInformationLength = sizeof(PROCESS_BASIC_INFORMATION)
- elif ProcessInformationClass == ProcessImageFileName:
- unicode_buffer = ctypes.create_unicode_buffer(u"", 0x1000)
- ProcessInformation = UNICODE_STRING(0, 0x1000, addressof(unicode_buffer))
- ProcessInformationLength = sizeof(UNICODE_STRING)
- elif ProcessInformationClass in (ProcessDebugPort, ProcessWow64Information, ProcessWx86Information, ProcessHandleCount, ProcessPriorityBoost):
- ProcessInformation = DWORD()
- ProcessInformationLength = sizeof(DWORD)
- else:
- raise Exception("Unknown ProcessInformationClass, use an explicit ProcessInformationLength value instead")
- ReturnLength = ULONG(0)
- ntstatus = _NtQueryInformationProcess(ProcessHandle, ProcessInformationClass, byref(ProcessInformation), ProcessInformationLength, byref(ReturnLength))
- if ntstatus != 0:
- raise ctypes.WinError( RtlNtStatusToDosError(ntstatus) )
- if ProcessInformationClass == ProcessBasicInformation:
- retval = ProcessInformation
- elif ProcessInformationClass in (ProcessDebugPort, ProcessWow64Information, ProcessWx86Information, ProcessHandleCount, ProcessPriorityBoost):
- retval = ProcessInformation.value
- elif ProcessInformationClass == ProcessImageFileName:
- vptr = ctypes.c_void_p(ProcessInformation.Buffer)
- cptr = ctypes.cast( vptr, ctypes.c_wchar * ProcessInformation.Length )
- retval = cptr.contents.raw
- else:
- retval = ProcessInformation.raw[:ReturnLength.value]
- return retval
-
-ZwQueryInformationProcess = NtQueryInformationProcess
-
-# NTSTATUS WINAPI NtQueryInformationThread(
-# __in HANDLE ThreadHandle,
-# __in THREADINFOCLASS ThreadInformationClass,
-# __out PVOID ThreadInformation,
-# __in ULONG ThreadInformationLength,
-# __out_opt PULONG ReturnLength
-# );
-def NtQueryInformationThread(ThreadHandle, ThreadInformationClass, ThreadInformationLength = None):
- _NtQueryInformationThread = windll.ntdll.NtQueryInformationThread
- _NtQueryInformationThread.argtypes = [HANDLE, THREADINFOCLASS, PVOID, ULONG, PULONG]
- _NtQueryInformationThread.restype = NTSTATUS
- if ThreadInformationLength is not None:
- ThreadInformation = ctypes.create_string_buffer("", ThreadInformationLength)
- else:
- if ThreadInformationClass == ThreadBasicInformation:
- ThreadInformation = THREAD_BASIC_INFORMATION()
- elif ThreadInformationClass == ThreadHideFromDebugger:
- ThreadInformation = BOOLEAN()
- elif ThreadInformationClass == ThreadQuerySetWin32StartAddress:
- ThreadInformation = PVOID()
- elif ThreadInformationClass in (ThreadAmILastThread, ThreadPriorityBoost):
- ThreadInformation = DWORD()
- elif ThreadInformationClass == ThreadPerformanceCount:
- ThreadInformation = LONGLONG() # LARGE_INTEGER
- else:
- raise Exception("Unknown ThreadInformationClass, use an explicit ThreadInformationLength value instead")
- ThreadInformationLength = sizeof(ThreadInformation)
- ReturnLength = ULONG(0)
- ntstatus = _NtQueryInformationThread(ThreadHandle, ThreadInformationClass, byref(ThreadInformation), ThreadInformationLength, byref(ReturnLength))
- if ntstatus != 0:
- raise ctypes.WinError( RtlNtStatusToDosError(ntstatus) )
- if ThreadInformationClass == ThreadBasicInformation:
- retval = ThreadInformation
- elif ThreadInformationClass == ThreadHideFromDebugger:
- retval = bool(ThreadInformation.value)
- elif ThreadInformationClass in (ThreadQuerySetWin32StartAddress, ThreadAmILastThread, ThreadPriorityBoost, ThreadPerformanceCount):
- retval = ThreadInformation.value
- else:
- retval = ThreadInformation.raw[:ReturnLength.value]
- return retval
-
-ZwQueryInformationThread = NtQueryInformationThread
-
-# NTSTATUS
-# NtQueryInformationFile(
-# IN HANDLE FileHandle,
-# OUT PIO_STATUS_BLOCK IoStatusBlock,
-# OUT PVOID FileInformation,
-# IN ULONG Length,
-# IN FILE_INFORMATION_CLASS FileInformationClass
-# );
-def NtQueryInformationFile(FileHandle, FileInformationClass, FileInformation, Length):
- _NtQueryInformationFile = windll.ntdll.NtQueryInformationFile
- _NtQueryInformationFile.argtypes = [HANDLE, PIO_STATUS_BLOCK, PVOID, ULONG, DWORD]
- _NtQueryInformationFile.restype = NTSTATUS
- IoStatusBlock = IO_STATUS_BLOCK()
- ntstatus = _NtQueryInformationFile(FileHandle, byref(IoStatusBlock), byref(FileInformation), Length, FileInformationClass)
- if ntstatus != 0:
- raise ctypes.WinError( RtlNtStatusToDosError(ntstatus) )
- return IoStatusBlock
-
-ZwQueryInformationFile = NtQueryInformationFile
-
-# DWORD STDCALL CsrGetProcessId (VOID);
-def CsrGetProcessId():
- _CsrGetProcessId = windll.ntdll.CsrGetProcessId
- _CsrGetProcessId.argtypes = []
- _CsrGetProcessId.restype = DWORD
- return _CsrGetProcessId()
-
-#==============================================================================
-# This calculates the list of exported symbols.
-_all = set(vars().keys()).difference(_all)
-__all__ = [_x for _x in _all if not _x.startswith('_')]
-__all__.sort()
-#==============================================================================
diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/proposal_generator/rrpn.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/proposal_generator/rrpn.py
deleted file mode 100644
index 8535dcd992bc4a83ea05d285f0ec5fae1271f41d..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/proposal_generator/rrpn.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import itertools
-import logging
-from typing import Dict, List
-import torch
-
-from annotator.oneformer.detectron2.config import configurable
-from annotator.oneformer.detectron2.layers import ShapeSpec, batched_nms_rotated, cat
-from annotator.oneformer.detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated
-from annotator.oneformer.detectron2.utils.memory import retry_if_cuda_oom
-
-from ..box_regression import Box2BoxTransformRotated
-from .build import PROPOSAL_GENERATOR_REGISTRY
-from .proposal_utils import _is_tracing
-from .rpn import RPN
-
-logger = logging.getLogger(__name__)
-
-
-def find_top_rrpn_proposals(
- proposals,
- pred_objectness_logits,
- image_sizes,
- nms_thresh,
- pre_nms_topk,
- post_nms_topk,
- min_box_size,
- training,
-):
- """
- For each feature map, select the `pre_nms_topk` highest scoring proposals,
- apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
- highest scoring proposals among all the feature maps if `training` is True,
- otherwise, returns the highest `post_nms_topk` scoring proposals for each
- feature map.
-
- Args:
- proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5).
- All proposal predictions on the feature maps.
- pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
- image_sizes (list[tuple]): sizes (h, w) for each image
- nms_thresh (float): IoU threshold to use for NMS
- pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
- When RRPN is run on multiple feature maps (as in FPN) this number is per
- feature map.
- post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
- When RRPN is run on multiple feature maps (as in FPN) this number is total,
- over all feature maps.
- min_box_size(float): minimum proposal box side length in pixels (absolute units wrt
- input images).
- training (bool): True if proposals are to be used in training, otherwise False.
- This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
- comment.
-
- Returns:
- proposals (list[Instances]): list of N Instances. The i-th Instances
- stores post_nms_topk object proposals for image i.
- """
- num_images = len(image_sizes)
- device = proposals[0].device
-
- # 1. Select top-k anchor for every level and every image
- topk_scores = [] # #lvl Tensor, each of shape N x topk
- topk_proposals = []
- level_ids = [] # #lvl Tensor, each of shape (topk,)
- batch_idx = torch.arange(num_images, device=device)
- for level_id, proposals_i, logits_i in zip(
- itertools.count(), proposals, pred_objectness_logits
- ):
- Hi_Wi_A = logits_i.shape[1]
- if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing
- num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk)
- else:
- num_proposals_i = min(Hi_Wi_A, pre_nms_topk)
-
- topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
-
- # each is N x topk
- topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5
-
- topk_proposals.append(topk_proposals_i)
- topk_scores.append(topk_scores_i)
- level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
-
- # 2. Concat all levels together
- topk_scores = cat(topk_scores, dim=1)
- topk_proposals = cat(topk_proposals, dim=1)
- level_ids = cat(level_ids, dim=0)
-
- # 3. For each image, run a per-level NMS, and choose topk results.
- results = []
- for n, image_size in enumerate(image_sizes):
- boxes = RotatedBoxes(topk_proposals[n])
- scores_per_img = topk_scores[n]
- lvl = level_ids
-
- valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img)
- if not valid_mask.all():
- if training:
- raise FloatingPointError(
- "Predicted boxes or scores contain Inf/NaN. Training has diverged."
- )
- boxes = boxes[valid_mask]
- scores_per_img = scores_per_img[valid_mask]
- lvl = lvl[valid_mask]
- boxes.clip(image_size)
-
- # filter empty boxes
- keep = boxes.nonempty(threshold=min_box_size)
- if _is_tracing() or keep.sum().item() != len(boxes):
- boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], lvl[keep])
-
- keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh)
- # In Detectron1, there was different behavior during training vs. testing.
- # (https://github.com/facebookresearch/Detectron/issues/459)
- # During training, topk is over the proposals from *all* images in the training batch.
- # During testing, it is over the proposals for each image separately.
- # As a result, the training behavior becomes batch-dependent,
- # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
- # This bug is addressed in Detectron2 to make the behavior independent of batch size.
- keep = keep[:post_nms_topk]
-
- res = Instances(image_size)
- res.proposal_boxes = boxes[keep]
- res.objectness_logits = scores_per_img[keep]
- results.append(res)
- return results
-
-
-@PROPOSAL_GENERATOR_REGISTRY.register()
-class RRPN(RPN):
- """
- Rotated Region Proposal Network described in :paper:`RRPN`.
- """
-
- @configurable
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- if self.anchor_boundary_thresh >= 0:
- raise NotImplementedError(
- "anchor_boundary_thresh is a legacy option not implemented for RRPN."
- )
-
- @classmethod
- def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
- ret = super().from_config(cfg, input_shape)
- ret["box2box_transform"] = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
- return ret
-
- @torch.no_grad()
- def label_and_sample_anchors(self, anchors: List[RotatedBoxes], gt_instances: List[Instances]):
- """
- Args:
- anchors (list[RotatedBoxes]): anchors for each feature map.
- gt_instances: the ground-truth instances for each image.
-
- Returns:
- list[Tensor]:
- List of #img tensors. i-th element is a vector of labels whose length is
- the total number of anchors across feature maps. Label values are in {-1, 0, 1},
- with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
- list[Tensor]:
- i-th element is a Nx5 tensor, where N is the total number of anchors across
- feature maps. The values are the matched gt boxes for each anchor.
- Values are undefined for those anchors not labeled as 1.
- """
- anchors = RotatedBoxes.cat(anchors)
-
- gt_boxes = [x.gt_boxes for x in gt_instances]
- del gt_instances
-
- gt_labels = []
- matched_gt_boxes = []
- for gt_boxes_i in gt_boxes:
- """
- gt_boxes_i: ground-truth boxes for i-th image
- """
- match_quality_matrix = retry_if_cuda_oom(pairwise_iou_rotated)(gt_boxes_i, anchors)
- matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
- # Matching is memory-expensive and may result in CPU tensors. But the result is small
- gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
-
- # A vector of labels (-1, 0, 1) for each anchor
- gt_labels_i = self._subsample_labels(gt_labels_i)
-
- if len(gt_boxes_i) == 0:
- # These values won't be used anyway since the anchor is labeled as background
- matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
- else:
- # TODO wasted indexing computation for ignored boxes
- matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
-
- gt_labels.append(gt_labels_i) # N,AHW
- matched_gt_boxes.append(matched_gt_boxes_i)
- return gt_labels, matched_gt_boxes
-
- @torch.no_grad()
- def predict_proposals(self, anchors, pred_objectness_logits, pred_anchor_deltas, image_sizes):
- pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas)
- return find_top_rrpn_proposals(
- pred_proposals,
- pred_objectness_logits,
- image_sizes,
- self.nms_thresh,
- self.pre_nms_topk[self.training],
- self.post_nms_topk[self.training],
- self.min_box_size,
- self.training,
- )
diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/utils/misc.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/utils/misc.py
deleted file mode 100644
index f2bca7733278c3a4b1f145bd7e5da23683b74961..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/utils/misc.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py
-"""
-Misc functions, including distributed helpers.
-
-Mostly copy-paste from torchvision references.
-"""
-from typing import List, Optional
-
-import torch
-import torch.distributed as dist
-import torchvision
-from torch import Tensor
-import warnings
-import torch.nn.functional as F
-import math
-
-def inverse_sigmoid(x, eps=1e-3):
- x = x.clamp(min=0, max=1)
- x1 = x.clamp(min=eps)
- x2 = (1 - x).clamp(min=eps)
- return torch.log(x1/x2)
-
-def _no_grad_trunc_normal_(tensor, mean, std, a, b):
- # Cut & paste from PyTorch official master until it's in a few official releases - RW
- # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
- def norm_cdf(x):
- # Computes standard normal cumulative distribution function
- return (1. + math.erf(x / math.sqrt(2.))) / 2.
-
- if (mean < a - 2 * std) or (mean > b + 2 * std):
- warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
- "The distribution of values may be incorrect.",
- stacklevel=2)
-
- with torch.no_grad():
- # Values are generated by using a truncated uniform distribution and
- # then using the inverse CDF for the normal distribution.
- # Get upper and lower cdf values
- l = norm_cdf((a - mean) / std)
- u = norm_cdf((b - mean) / std)
-
- # Uniformly fill tensor with values from [l, u], then translate to
- # [2l-1, 2u-1].
- tensor.uniform_(2 * l - 1, 2 * u - 1)
-
- # Use inverse cdf transform for normal distribution to get truncated
- # standard normal
- tensor.erfinv_()
-
- # Transform to proper mean, std
- tensor.mul_(std * math.sqrt(2.))
- tensor.add_(mean)
-
- # Clamp to ensure it's in the proper range
- tensor.clamp_(min=a, max=b)
- return tensor
-
-def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
- # type: (Tensor, float, float, float, float) -> Tensor
- r"""Fills the input Tensor with values drawn from a truncated
- normal distribution. The values are effectively drawn from the
- normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
- with values outside :math:`[a, b]` redrawn until they are within
- the bounds. The method used for generating the random values works
- best when :math:`a \leq \text{mean} \leq b`.
- Args:
- tensor: an n-dimensional `torch.Tensor`
- mean: the mean of the normal distribution
- std: the standard deviation of the normal distribution
- a: the minimum cutoff value
- b: the maximum cutoff value
- Examples:
- >>> w = torch.empty(3, 5)
- >>> nn.init.trunc_normal_(w)
- """
- return _no_grad_trunc_normal_(tensor, mean, std, a, b)
-
-def resize(input,
- size=None,
- scale_factor=None,
- mode='nearest',
- align_corners=None,
- warning=True):
- if warning:
- if size is not None and align_corners:
- input_h, input_w = tuple(int(x) for x in input.shape[2:])
- output_h, output_w = tuple(int(x) for x in size)
- if output_h > input_h or output_w > output_h:
- if ((output_h > 1 and output_w > 1 and input_h > 1
- and input_w > 1) and (output_h - 1) % (input_h - 1)
- and (output_w - 1) % (input_w - 1)):
- warnings.warn(
- f'When align_corners={align_corners}, '
- 'the output would more aligned if '
- f'input size {(input_h, input_w)} is `x+1` and '
- f'out size {(output_h, output_w)} is `nx+1`')
- if isinstance(size, torch.Size):
- size = tuple(int(x) for x in size)
- return F.interpolate(input, size, scale_factor, mode, align_corners)
-
-def _max_by_axis(the_list):
- # type: (List[List[int]]) -> List[int]
- maxes = the_list[0]
- for sublist in the_list[1:]:
- for index, item in enumerate(sublist):
- maxes[index] = max(maxes[index], item)
- return maxes
-
-
-class NestedTensor(object):
- def __init__(self, tensors, mask: Optional[Tensor]):
- self.tensors = tensors
- self.mask = mask
-
- def to(self, device):
- # type: (Device) -> NestedTensor # noqa
- cast_tensor = self.tensors.to(device)
- mask = self.mask
- if mask is not None:
- assert mask is not None
- cast_mask = mask.to(device)
- else:
- cast_mask = None
- return NestedTensor(cast_tensor, cast_mask)
-
- def decompose(self):
- return self.tensors, self.mask
-
- def __repr__(self):
- return str(self.tensors)
-
-
-def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
- # TODO make this more general
- if tensor_list[0].ndim == 3:
- if torchvision._is_tracing():
- # nested_tensor_from_tensor_list() does not export well to ONNX
- # call _onnx_nested_tensor_from_tensor_list() instead
- return _onnx_nested_tensor_from_tensor_list(tensor_list)
-
- # TODO make it support different-sized images
- max_size = _max_by_axis([list(img.shape) for img in tensor_list])
- # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
- batch_shape = [len(tensor_list)] + max_size
- b, c, h, w = batch_shape
- dtype = tensor_list[0].dtype
- device = tensor_list[0].device
- tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
- mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
- for img, pad_img, m in zip(tensor_list, tensor, mask):
- pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
- m[: img.shape[1], : img.shape[2]] = False
- else:
- raise ValueError("not supported")
- return NestedTensor(tensor, mask)
-
-
-# _onnx_nested_tensor_from_tensor_list() is an implementation of
-# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
-@torch.jit.unused
-def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
- max_size = []
- for i in range(tensor_list[0].dim()):
- max_size_i = torch.max(
- torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
- ).to(torch.int64)
- max_size.append(max_size_i)
- max_size = tuple(max_size)
-
- # work around for
- # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
- # m[: img.shape[1], :img.shape[2]] = False
- # which is not yet supported in onnx
- padded_imgs = []
- padded_masks = []
- for img in tensor_list:
- padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
- padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
- padded_imgs.append(padded_img)
-
- m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
- padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
- padded_masks.append(padded_mask.to(torch.bool))
-
- tensor = torch.stack(padded_imgs)
- mask = torch.stack(padded_masks)
-
- return NestedTensor(tensor, mask=mask)
-
-
-def is_dist_avail_and_initialized():
- if not dist.is_available():
- return False
- if not dist.is_initialized():
- return False
- return True
diff --git a/spaces/TIMAX/Logic-Translator/README.md b/spaces/TIMAX/Logic-Translator/README.md
deleted file mode 100644
index 5995e15826514e8bfb907e404fd487b8b802c95d..0000000000000000000000000000000000000000
--- a/spaces/TIMAX/Logic-Translator/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-title: Logic Translator
-emoji: 🏢
-colorFrom: yellow
-colorTo: gray
-sdk: gradio
-sdk_version: 3.0.17
-app_file: app.py
-pinned: true
----
-
-## :hand: Intro
-
-Type English for logic symbols! This is a simple string replacement program dedicated for typing logic symbols. Since those symbols are not on our keyboard, typing them is a little bit cumbersome. This tool allows you to type English to get them. Have fun!
-
-> Permalink: https://huggingface.co/spaces/TIMAX/Logic-Translator
-
-## :information_source: Usage
-
-Input your FOL sentence in the box of **string**. If you want to type a specific logic symbol at some point of your FOL sentence, just type its corresponding tag (uppercase English words) instead, and keep everything else the same.
-
-| Logic Symbol | Tag |
-| :----------: | :---: |
-| ∧ | AND |
-| ∨ | OR |
-| ¬ | NOT |
-| ⊕ | XR |
-| → | IMPLY |
-| ↔ | EQUIV |
-| ∀ | ALL |
-| ∃ | EXIST |
-
-The translation is **real-time**. After your input is finished, you can directly copy the result in the box of **output**.
-
-Note that you can input **multi-line** sentences!
-
-You will find five examples of using them below the boxes.
-
-## :scroll: Source Code
-
-Very simple. The core of it is just like:
-
-```python
-def logic(string: str):
- for word, symbol in logic_dict.items():
- string = string.replace(word, symbol)
- return string
-```
-
-where `logic_dict` stores all the translation between tags and logic symbols. The user interface is built with [Gradio](https://gradio.app/).
-
-## :warning:Notice
-
-Please don’t include the ==exact same uppercase spelling== of any of the tags in your FOL sentence if you don’t mean it, or the program will replace them without thinking. For example, it your input sentence is: **WALL(berlin wall)** meaning “berlin wall is a wall”, the program output would be **W∀(berlin wall)** with the **ALL** after **W** replaced by **∀**, which is not what you want.
-
-## :email: Contact
-
-If you notice any problem or have any suggestion, please contact me through [E-mail](mailto:qi11@illinois.edu) or Slack. Thanks!
diff --git a/spaces/TabPFN/TabPFNEvaluation/TabPFN/priors/mlp.py b/spaces/TabPFN/TabPFNEvaluation/TabPFN/priors/mlp.py
deleted file mode 100644
index e489556e52196ca16463c7c8f0e25a69aaa3c630..0000000000000000000000000000000000000000
--- a/spaces/TabPFN/TabPFNEvaluation/TabPFN/priors/mlp.py
+++ /dev/null
@@ -1,173 +0,0 @@
-import random
-import math
-
-import torch
-from torch import nn
-import numpy as np
-
-from utils import default_device
-from .utils import get_batch_to_dataloader
-
-class GaussianNoise(nn.Module):
- def __init__(self, std, device):
- super().__init__()
- self.std = std
- self.device=device
-
- def forward(self, x):
- return x + torch.normal(torch.zeros_like(x), self.std)
-
-
-def causes_sampler_f(num_causes):
- means = np.random.normal(0, 1, (num_causes))
- std = np.abs(np.random.normal(0, 1, (num_causes)) * means)
- return means, std
-
-def get_batch(batch_size, seq_len, num_features, hyperparameters, device=default_device, num_outputs=1, sampling='normal', **kwargs):
- if ('mix_activations' in hyperparameters) and hyperparameters['mix_activations']:
- s = hyperparameters['prior_mlp_activations']()
- hyperparameters['prior_mlp_activations'] = lambda : s
-
- class MLP(torch.nn.Module):
- def __init__(self, hyperparameters):
- super(MLP, self).__init__()
-
- with torch.no_grad():
-
- for key in hyperparameters:
- setattr(self, key, hyperparameters[key])
-
- assert (self.num_layers >= 2)
-
- if 'verbose' in hyperparameters and self.verbose:
- print({k : hyperparameters[k] for k in ['is_causal', 'num_causes', 'prior_mlp_hidden_dim'
- , 'num_layers', 'noise_std', 'y_is_effect', 'pre_sample_weights', 'prior_mlp_dropout_prob'
- , 'pre_sample_causes']})
-
- if self.is_causal:
- self.prior_mlp_hidden_dim = max(self.prior_mlp_hidden_dim, num_outputs + 2 * num_features)
- else:
- self.num_causes = num_features
-
- # This means that the mean and standard deviation of each cause is determined in advance
- if self.pre_sample_causes:
- self.causes_mean, self.causes_std = causes_sampler_f(self.num_causes)
- self.causes_mean = torch.tensor(self.causes_mean, device=device).unsqueeze(0).unsqueeze(0).tile(
- (seq_len, 1, 1))
- self.causes_std = torch.tensor(self.causes_std, device=device).unsqueeze(0).unsqueeze(0).tile(
- (seq_len, 1, 1))
-
- def generate_module(layer_idx, out_dim):
- # Determine std of each noise term in initialization, so that is shared in runs
- # torch.abs(torch.normal(torch.zeros((out_dim)), self.noise_std)) - Change std for each dimension?
- noise = (GaussianNoise(torch.abs(torch.normal(torch.zeros(size=(1, out_dim), device=device), float(self.noise_std))), device=device)
- if self.pre_sample_weights else GaussianNoise(float(self.noise_std), device=device))
- return [
- nn.Sequential(*[self.prior_mlp_activations()
- , nn.Linear(self.prior_mlp_hidden_dim, out_dim)
- , noise])
- ]
-
- self.layers = [nn.Linear(self.num_causes, self.prior_mlp_hidden_dim, device=device)]
- self.layers += [module for layer_idx in range(self.num_layers-1) for module in generate_module(layer_idx, self.prior_mlp_hidden_dim)]
- if not self.is_causal:
- self.layers += generate_module(-1, num_outputs)
- self.layers = nn.Sequential(*self.layers)
-
- # Initialize Model parameters
- for i, (n, p) in enumerate(self.layers.named_parameters()):
- if self.block_wise_dropout:
- if len(p.shape) == 2: # Only apply to weight matrices and not bias
- nn.init.zeros_(p)
- # TODO: N blocks should be a setting
- n_blocks = random.randint(1, math.ceil(math.sqrt(min(p.shape[0], p.shape[1]))))
- w, h = p.shape[0] // n_blocks, p.shape[1] // n_blocks
- keep_prob = (n_blocks*w*h) / p.numel()
- for block in range(0, n_blocks):
- nn.init.normal_(p[w * block: w * (block+1), h * block: h * (block+1)], std=self.init_std / keep_prob**(1/2))
- else:
- if len(p.shape) == 2: # Only apply to weight matrices and not bias
- dropout_prob = self.prior_mlp_dropout_prob if i > 0 else 0.0 # Don't apply dropout in first layer
- dropout_prob = min(dropout_prob, 0.99)
- nn.init.normal_(p, std=self.init_std / (1. - dropout_prob)**(1/2))
- p *= torch.bernoulli(torch.zeros_like(p) + 1. - dropout_prob)
-
- def forward(self):
- def sample_normal():
- if self.pre_sample_causes:
- causes = torch.normal(self.causes_mean, self.causes_std.abs()).float()
- else:
- causes = torch.normal(0., 1., (seq_len, 1, self.num_causes), device=device).float()
- return causes
-
- if self.sampling == 'normal':
- causes = sample_normal()
- elif self.sampling == 'mixed':
- zipf_p, multi_p, normal_p = random.random() * 0.66, random.random() * 0.66, random.random() * 0.66
- def sample_cause(n):
- if random.random() > normal_p:
- if self.pre_sample_causes:
- return torch.normal(self.causes_mean[:, :, n], self.causes_std[:, :, n].abs()).float()
- else:
- return torch.normal(0., 1., (seq_len, 1), device=device).float()
- elif random.random() > multi_p:
- x = torch.multinomial(torch.rand((random.randint(2, 10))), seq_len, replacement=True).to(device).unsqueeze(-1).float()
- x = (x - torch.mean(x)) / torch.std(x)
- return x
- else:
- x = torch.minimum(torch.tensor(np.random.zipf(2.0 + random.random() * 2, size=(seq_len)),
- device=device).unsqueeze(-1).float(), torch.tensor(10.0, device=device))
- return x - torch.mean(x)
- causes = torch.cat([sample_cause(n).unsqueeze(-1) for n in range(self.num_causes)], -1)
- elif self.sampling == 'uniform':
- causes = torch.rand((seq_len, 1, self.num_causes), device=device)
- else:
- raise ValueError(f'Sampling is set to invalid setting: {sampling}.')
-
- outputs = [causes]
- for layer in self.layers:
- outputs.append(layer(outputs[-1]))
- outputs = outputs[2:]
-
- if self.is_causal:
- ## Sample nodes from graph if model is causal
- outputs_flat = torch.cat(outputs, -1)
-
- if self.in_clique:
- random_perm = random.randint(0, outputs_flat.shape[-1] - num_outputs - num_features) + torch.randperm(num_outputs + num_features, device=device)
- else:
- random_perm = torch.randperm(outputs_flat.shape[-1]-1, device=device)
-
- random_idx_y = list(range(-num_outputs, -0)) if self.y_is_effect else random_perm[0:num_outputs]
- random_idx = random_perm[num_outputs:num_outputs + num_features]
-
- if self.sort_features:
- random_idx, _ = torch.sort(random_idx)
- y = outputs_flat[:, :, random_idx_y]
-
- x = outputs_flat[:, :, random_idx]
- else:
- y = outputs[-1][:, :, :]
- x = causes
-
- if bool(torch.any(torch.isnan(x)).detach().cpu().numpy()) or bool(torch.any(torch.isnan(y)).detach().cpu().numpy()):
- x[:] = 0.0
- y[:] = 1.0
-
- return x, y
-
- model = MLP(hyperparameters).to(device)
-
- sample = sum([[model()] for _ in range(0, batch_size)], [])
-
- x, y = zip(*sample)
- y = torch.cat(y, 1).detach().squeeze(2)
- x = torch.cat(x, 1).detach()
- x = x[..., torch.randperm(x.shape[-1])]
-
- return x, y, y
-
-
-DataLoader = get_batch_to_dataloader(get_batch)
-DataLoader.num_outputs = 1
-
diff --git a/spaces/TungB/mini-photoshop/lama.py b/spaces/TungB/mini-photoshop/lama.py
deleted file mode 100644
index 889c1d288a187ecc24caa8295577a25d7ad711d7..0000000000000000000000000000000000000000
--- a/spaces/TungB/mini-photoshop/lama.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import os
-from os.path import abspath, dirname
-from typing import Optional
-
-import cv2
-import gdown
-import numpy as np
-import torch
-from skimage.measure import regionprops
-
-from utils import norm_img, pad_img_to_modulo, resize_max_size, dilate_mask
-
-LAMA_MODEL_URL = (
- "https://drive.google.com/uc?id=18boxtgk5N69v3eltQMkSVz55a85TomD0"
-)
-LAMA_MODEL_LOCAL = os.path.join(
- dirname(abspath(__file__)), "big-lama.pt"
-)
-
-
-class LaMa:
- """LaMa Model"""
-
- pad_mod = 8
- pad_to_square = False
- min_size: Optional[int] = None
-
- def __init__(self, device):
- """Init class
-
- Args:
- device (str): device
- """
- self.device = device
- self.init_model()
-
- def init_model(self):
- """Init model"""
- if not os.path.exists(LAMA_MODEL_LOCAL):
- os.makedirs(dirname(LAMA_MODEL_LOCAL), exist_ok=True)
- with open(LAMA_MODEL_LOCAL, "wb") as model_file:
- gdown.download(url=LAMA_MODEL_URL, output=model_file)
- model_path = LAMA_MODEL_LOCAL
- model = torch.jit.load(model_path, map_location="cpu")
- model = model.to(self.device)
- model.eval()
- self.model = model
-
- def forward(self, image, mask):
- """Forward model
-
- Args:
- image (np.ndarray): Image (RGB)
- mask (np.ndarray): Mask
-
- Returns:
- np.ndarray: Inpainted image (BGR)
- """
- image = norm_img(image)
- mask = norm_img(mask)
-
- mask = (mask > 0) * 1
- image = torch.from_numpy(image).unsqueeze(0).to(self.device)
- mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
-
- inpainted_image = self.model(image, mask)
-
- cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy()
- cur_res = np.clip(cur_res * 255, 0, 255).astype("uint8")
- cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR)
- return cur_res
-
- def _pad_forward(self, image, mask):
- """Padding image and mask, then forward model
-
- Args:
- image (np.ndarray): Image
- mask (np.ndarray): Mask
-
- Returns:
- np.ndarray: Inpainted Image
- """
- origin_height, origin_width = image.shape[:2]
-
- regions = regionprops(mask)
- for prop in regions:
- y1, x1, y2, x2 = prop.bbox
- x1, y1 = max(x1 - self.pad_mod, 0), max(y1 - self.pad_mod, 0)
- x2, y2 = min(x2 + self.pad_mod, origin_width), min(
- y2 + self.pad_mod, origin_height
- )
- mask[y1:y2, x1:x2] = 255
-
- pad_image = pad_img_to_modulo(
- image,
- mod=self.pad_mod,
- square=self.pad_to_square,
- min_size=self.min_size,
- )
- pad_mask = pad_img_to_modulo(
- mask,
- mod=self.pad_mod,
- square=self.pad_to_square,
- min_size=self.min_size,
- )
-
- result = self.forward(pad_image, pad_mask)
- result = result[0:origin_height, 0:origin_width, :]
-
- original_pixel_indices = mask != 255
- result[original_pixel_indices] = image[:, :, ::-1][
- original_pixel_indices
- ]
- return result
-
- @torch.no_grad()
- def __call__(self, image, mask, resize_limit=512):
- """
- Args:
- image (np.ndarray): Image
- mask (np.ndarray): Mask
- resize_limit (int, optional): max size. Defaults to 512.
-
- Returns:
- np.ndarray: Inpainted Image
- """
-
- if resize_limit and max(image.shape) > resize_limit:
- origin_size = image.shape[:2]
- downsize_image = resize_max_size(image, size_limit=resize_limit)
- downsize_mask = resize_max_size(mask, size_limit=resize_limit)
- inpaint_result = self._pad_forward(downsize_image, downsize_mask)
- # only paste masked area result
- inpaint_result = cv2.resize(
- inpaint_result,
- (origin_size[1], origin_size[0]),
- interpolation=cv2.INTER_CUBIC,
- )
- else:
- inpaint_result = self._pad_forward(image, mask)
-
- dilation_mask = dilate_mask(mask, dilate_size=30)
- original_pixel_indices = dilation_mask != 255
- inpaint_result[original_pixel_indices] = image[:, :, ::-1][
- original_pixel_indices
- ]
-
- return inpaint_result
diff --git a/spaces/UjjwalVIT/Text_analysis_and_metadata_app/app.py b/spaces/UjjwalVIT/Text_analysis_and_metadata_app/app.py
deleted file mode 100644
index eea44455568000ecebc9745c4b0b3bbfb417ede4..0000000000000000000000000000000000000000
--- a/spaces/UjjwalVIT/Text_analysis_and_metadata_app/app.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import streamlit as st
-import sumy
-
-# using sumy library for summarization
-from sumy.parsers.plaintext import PlaintextParser
-from sumy.nlp.tokenizers import Tokenizer
-from sumy.summarizers.lex_rank import LexRankSummarizer
-from sumy.summarizers.text_rank import TextRankSummarizer
-from sumy.nlp.tokenizers import Tokenizer
-import pandas as pd
-import matplotlib.pyplot as plt
-# import seaborn
-from transformers import BartForConditionalGeneration, BartTokenizer
-from transformers import T5ForConditionalGeneration, T5Tokenizer
-from rouge import Rouge
-import altair as at
-import torch
-from Text_analysis import *
-from Metadata import *
-from app_utils import *
-from PIL import Image
-
-
-HTML_BANNER = """
-
-
Summary app
-
- """
-def load_image(file):
- img = Image.open(file)
- return img
-
-
-def main():
- menu=['Summarization','Text-Analysis','Meta-Data']
- choice=st.sidebar.selectbox("Menu",menu)
-
-
- if choice=='Summarization':
- stc.html(HTML_BANNER)
- st.image(load_image('Text-Summary.png'))
- st.subheader('summarization')
- raw_text=st.text_area("Enter the text you want to summarize")
- if st.button("Summarize"):
- with st.expander("Original Text"):
- st.write(raw_text)
- c1, c2 = st.columns(2)
-
- with c1:
-
- with st.expander("LexRank Summary"):
- try:
- summary = sumy_summarizer(raw_text)
- document_len={"Original":len(raw_text),
- "Summary":len(summary)
- }
- st.write(document_len)
- st.write(summary)
- st.info("Rouge Score")
- score=evaluate_summary(summary,raw_text)
- st.write(score.T)
- st.subheader(" ")
- score['metrics']=score.index
- c=at.Chart(score).mark_bar().encode(
- x='metrics',y='rouge-1'
- )
- st.altair_chart(c)
- except:
- st.warning('Insufficient data')
-
-
-
- with c2:
- with st.expander("TextRank Summary"):
- try:
- text_summary=sumy_text_summarizer(raw_text)
- document_len={"Original":len(raw_text),
- "Summary":len(summary)
- }
- st.write(document_len)
- st.write(text_summary)
-
- st.info("Rouge Score")
- score=evaluate_summary(text_summary,raw_text)
- st.write(score.T)
- st.subheader(" ")
- score['metrics']=score.index
- c=at.Chart(score).mark_bar().encode(
- x='metrics',y='rouge-1'
- )
- st.altair_chart(c)
-
- except:
- st.warning('Insufficient data')
-
-
- st.subheader("Bart Sumary")
- with st.expander("Bart Summary"):
- try:
- bart_summ = bart_summary(raw_text)
- document_len={"Original":len(raw_text),
- "Summary":len(summary)
- }
- st.write(document_len)
- st.write(bart_summ)
- st.info("Rouge Score")
- score=evaluate_summary(bart_summ,raw_text)
- st.write(score.T)
- st.subheader(" ")
- score['metrics']=score.index
- c=at.Chart(score).mark_bar().encode(
- x='metrics',y='rouge-1'
- )
- st.altair_chart(c)
- except:
- st.warning('Insufficient data')
-
- st.subheader("T5 Sumarization")
- with st.expander("T5 Summary"):
- try:
- T5_sum = T5_summary(raw_text)
- document_len={"Original":len(raw_text),
- "Summary":len(summary)
- }
- st.write(document_len)
- st.write(T5_sum)
- st.info("Rouge Score")
- score=evaluate_summary(T5_sum,raw_text)
- st.write(score.T)
- st.subheader(" ")
- score['metrics']=score.index
- c=at.Chart(score).mark_bar().encode(
- x='metrics',y='rouge-1'
- )
- st.altair_chart(c)
- except:
- st.warning('Insufficient data')
-
-
-
- elif choice=='Text-Analysis':
- text_analysis()
- else:
- metadata()
-
-
-if __name__=='__main__':
- main()
diff --git a/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_hed.py b/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_hed.py
deleted file mode 100644
index b71da04640c23fb906dbae0ec97e0f8a24cbf87b..0000000000000000000000000000000000000000
--- a/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_hed.py
+++ /dev/null
@@ -1,181 +0,0 @@
-import gradio as gr
-import torch
-from controlnet_aux import HEDdetector
-from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
-from PIL import Image
-
-from diffusion_webui.utils.model_list import (
- controlnet_hed_model_list,
- stable_model_list,
-)
-from diffusion_webui.utils.scheduler_list import (
- SCHEDULER_LIST,
- get_scheduler_list,
-)
-
-
-class StableDiffusionControlNetHEDGenerator:
- def __init__(self):
- self.pipe = None
-
- def load_model(self, stable_model_path, controlnet_model_path, scheduler):
- if self.pipe is None:
- controlnet = ControlNetModel.from_pretrained(
- controlnet_model_path, torch_dtype=torch.float16
- )
-
- self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
- pretrained_model_name_or_path=stable_model_path,
- controlnet=controlnet,
- safety_checker=None,
- torch_dtype=torch.float16,
- )
-
- self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
- self.pipe.to("cuda")
- self.pipe.enable_xformers_memory_efficient_attention()
-
- return self.pipe
-
- def controlnet_hed(self, image_path: str):
- hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
- image = Image.open(image_path)
- image = hed(image)
-
- return image
-
- def generate_image(
- self,
- image_path: str,
- stable_model_path: str,
- controlnet_hed_model_path: str,
- prompt: str,
- negative_prompt: str,
- num_images_per_prompt: int,
- guidance_scale: int,
- num_inference_step: int,
- sheduler: str,
- seed_generator: int,
- ):
-
- image = self.controlnet_hed(image_path=image_path)
-
- pipe = self.load_model(
- stable_model_path=stable_model_path,
- controlnet_model_path=controlnet_hed_model_path,
- scheduler=sheduler,
- )
-
- if seed_generator == 0:
- random_seed = torch.randint(0, 1000000, (1,))
- generator = torch.manual_seed(random_seed)
- else:
- generator = torch.manual_seed(seed_generator)
-
- output = pipe(
- prompt=prompt,
- image=image,
- negative_prompt=negative_prompt,
- num_images_per_prompt=num_images_per_prompt,
- num_inference_steps=num_inference_step,
- guidance_scale=guidance_scale,
- generator=generator,
- ).images
-
- return output
-
- def app():
- with gr.Blocks():
- with gr.Row():
- with gr.Column():
- controlnet_hed_image_file = gr.Image(
- type="filepath", label="Image"
- )
- controlnet_hed_prompt = gr.Textbox(
- lines=1,
- show_label=False,
- placeholder="Prompt",
- )
-
- controlnet_hed_negative_prompt = gr.Textbox(
- lines=1,
- show_label=False,
- placeholder="Negative Prompt",
- )
-
- with gr.Row():
- with gr.Column():
- controlnet_hed_stable_model_id = gr.Dropdown(
- choices=stable_model_list,
- value=stable_model_list[0],
- label="Stable Model Id",
- )
- controlnet_hed_guidance_scale = gr.Slider(
- minimum=0.1,
- maximum=15,
- step=0.1,
- value=7.5,
- label="Guidance Scale",
- )
- controlnet_hed_num_inference_step = gr.Slider(
- minimum=1,
- maximum=100,
- step=1,
- value=50,
- label="Num Inference Step",
- )
-
- controlnet_hed_num_images_per_prompt = gr.Slider(
- minimum=1,
- maximum=10,
- step=1,
- value=1,
- label="Number Of Images",
- )
-
- with gr.Row():
- with gr.Column():
- controlnet_hed_model_id = gr.Dropdown(
- choices=controlnet_hed_model_list,
- value=controlnet_hed_model_list[0],
- label="ControlNet Model Id",
- )
- controlnet_hed_scheduler = gr.Dropdown(
- choices=SCHEDULER_LIST,
- value=SCHEDULER_LIST[0],
- label="Scheduler",
- )
-
- controlnet_hed_seed_generator = gr.Number(
- minimum=0,
- maximum=1000000,
- step=1,
- value=0,
- label="Seed Generator",
- )
-
- controlnet_hed_predict = gr.Button(value="Generator")
-
- with gr.Column():
- output_image = gr.Gallery(
- label="Generated images",
- show_label=False,
- elem_id="gallery",
- ).style(grid=(1, 2))
-
- controlnet_hed_predict.click(
- fn=StableDiffusionControlNetHEDGenerator().generate_image,
- inputs=[
- controlnet_hed_image_file,
- controlnet_hed_stable_model_id,
- controlnet_hed_model_id,
- controlnet_hed_prompt,
- controlnet_hed_negative_prompt,
- controlnet_hed_num_images_per_prompt,
- controlnet_hed_guidance_scale,
- controlnet_hed_num_inference_step,
- controlnet_hed_scheduler,
- controlnet_hed_seed_generator,
- ],
- outputs=[output_image],
- )
diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/gen_doc/__init__.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/gen_doc/__init__.py
deleted file mode 100644
index 31e6aeacd89726b7e4428d47fb36b465524ed723..0000000000000000000000000000000000000000
--- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/gen_doc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from . import gen_notebooks, nbdoc, core, doctest, nbtest
diff --git a/spaces/Xenova/sponsorblock-ml/src/model.py b/spaces/Xenova/sponsorblock-ml/src/model.py
deleted file mode 100644
index 95c0a4f13afb36097a74ce3033a88de9f1bb9809..0000000000000000000000000000000000000000
--- a/spaces/Xenova/sponsorblock-ml/src/model.py
+++ /dev/null
@@ -1,235 +0,0 @@
-from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, AutoConfig, AutoModelForSequenceClassification, TrainingArguments
-from shared import CustomTokens, GeneralArguments
-from dataclasses import dataclass, field
-from typing import Optional, Union
-import torch
-import classify
-import base64
-import re
-import requests
-import json
-import logging
-
-logging.basicConfig()
-logger = logging.getLogger(__name__)
-
-# Public innertube key (b64 encoded so that it is not incorrectly flagged)
-INNERTUBE_KEY = base64.b64decode(
- b'QUl6YVN5QU9fRkoyU2xxVThRNFNURUhMR0NpbHdfWTlfMTFxY1c4').decode()
-
-YT_CONTEXT = {
- 'client': {
- 'userAgent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36,gzip(gfe)',
- 'clientName': 'WEB',
- 'clientVersion': '2.20211221.00.00',
- }
-}
-_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;\s*(?:var\s+meta|y in tt?fn(tt,y,{enumerable:!0,configurable:!0,writable:!0,value:n}):tt[y]=n;var jt=(tt,y,n)=>(gn(tt,typeof y!="symbol"?y+"":y,n),n);(function(){var tt;"use strict";function _mergeNamespaces(y,n){return n.forEach(function(u){u&&typeof u!="string"&&!Array.isArray(u)&&Object.keys(u).forEach(function(d){if(d!=="default"&&!(d in y)){var l=Object.getOwnPropertyDescriptor(u,d);Object.defineProperty(y,d,l.get?l:{enumerable:!0,get:function(){return u[d]}})}})}),Object.freeze(y)}function dispatchCallback(y,n){y!==null&&y(n)}function reverseDictionary(y){return Object.fromEntries(Object.entries(y).map(([n,u])=>[u,n]))}function escapeRegExp(y){return y.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}const Callable=class{constructor(){let y=function(...n){return y._call(...n)};return Object.setPrototypeOf(y,new.target.prototype)}_call(...y){throw Error("Must implement _call method in subclass")}};function isTypedArray(y){var n,u,d;return((d=(u=(n=y==null?void 0:y.prototype)==null?void 0:n.__proto__)==null?void 0:u.constructor)==null?void 0:d.name)==="TypedArray"}function isIntegralNumber(y){return Number.isInteger(y)||typeof y=="bigint"}function exists(y){return y!=null}function mergeArrays(...y){return Array.prototype.concat.apply([],y)}var sharp={},ONNX_NODE=Object.freeze({__proto__:null,default:sharp});function getDefaultExportFromCjs(y){return y&&y.__esModule&&Object.prototype.hasOwnProperty.call(y,"default")?y.default:y}function getAugmentedNamespace(y){if(y.__esModule)return y;var n=y.default;if(typeof n=="function"){var u=function d(){return this instanceof d?Reflect.construct(n,arguments,this.constructor):n.apply(this,arguments)};u.prototype=n.prototype}else u={};return Object.defineProperty(u,"__esModule",{value:!0}),Object.keys(y).forEach(function(d){var l=Object.getOwnPropertyDescriptor(y,d);Object.defineProperty(u,d,l.get?l:{enumerable:!0,get:function(){return y[d]}})}),u}var ortWeb_min$1={exports:{}};const backends={},backendsSortedByPriority=[],registerBackend=(y,n,u)=>{if(n&&typeof n.init=="function"&&typeof n.createSessionHandler=="function"){const d=backends[y];if(d===void 0)backends[y]={backend:n,priority:u};else{if(d.priority>u)return;if(d.priority===u&&d.backend!==n)throw new Error(`cannot register backend "${y}" using priority ${u}`)}if(u>=0){const l=backendsSortedByPriority.indexOf(y);l!==-1&&backendsSortedByPriority.splice(l,1);for(let p=0;p{const n=y.length===0?backendsSortedByPriority:y,u=[];for(const d of n){const l=backends[d];if(l){if(l.initialized)return l.backend;if(l.aborted)continue;const p=!!l.initPromise;try{return p||(l.initPromise=l.backend.init()),await l.initPromise,l.initialized=!0,l.backend}catch(s){p||u.push({name:d,err:s}),l.aborted=!0}finally{delete l.initPromise}}}throw new Error(`no available backend found. ERR: ${u.map(d=>`[${d.name}] ${d.err}`).join(", ")}`)};class EnvImpl{constructor(){this.wasm={},this.webgl={},this.logLevelInternal="warning"}set logLevel(n){if(n!==void 0){if(typeof n!="string"||["verbose","info","warning","error","fatal"].indexOf(n)===-1)throw new Error(`Unsupported logging level: ${n}`);this.logLevelInternal=n}}get logLevel(){return this.logLevelInternal}}const env$1=new EnvImpl,isBigInt64ArrayAvailable=typeof BigInt64Array<"u"&&typeof BigInt64Array.from=="function",isBigUint64ArrayAvailable=typeof BigUint64Array<"u"&&typeof BigUint64Array.from=="function",NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP=new Map([["float32",Float32Array],["uint8",Uint8Array],["int8",Int8Array],["uint16",Uint16Array],["int16",Int16Array],["int32",Int32Array],["bool",Uint8Array],["float64",Float64Array],["uint32",Uint32Array]]),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP=new Map([[Float32Array,"float32"],[Uint8Array,"uint8"],[Int8Array,"int8"],[Uint16Array,"uint16"],[Int16Array,"int16"],[Int32Array,"int32"],[Float64Array,"float64"],[Uint32Array,"uint32"]]);isBigInt64ArrayAvailable&&(NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set("int64",BigInt64Array),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigInt64Array,"int64")),isBigUint64ArrayAvailable&&(NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set("uint64",BigUint64Array),NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigUint64Array,"uint64"));const calculateSize=y=>{let n=1;for(let u=0;u{const t=document.createElement("canvas"),e=t.getContext("2d");if(!n||!e)return o();const r=new Image;r.crossOrigin="Anonymous",r.src=n,r.onload=()=>{t.width=r.width,t.height=r.height,e.drawImage(r,0,0,t.width,t.height);const i=e.getImageData(0,0,t.width,t.height);if(u!==void 0){if(u.height!==void 0&&u.height!==t.height)throw new Error("Image input config height doesn't match ImageBitmap height");if(f.height=t.height,u.width!==void 0&&u.width!==t.width)throw new Error("Image input config width doesn't match ImageBitmap width");f.width=t.width}else f.height=t.height,f.width=t.width;a(at.bufferToTensor(i.data,f))}});throw new Error("Input data provided is not supported - aborted tensor creation")}if(h!==void 0)return at.bufferToTensor(h,f);throw new Error("Input data provided is not supported - aborted tensor creation")}toImageData(n){var u,d;const l=document.createElement("canvas").getContext("2d");let p;if(l!=null){const s=this.dims[3],h=this.dims[2],f=this.dims[1],a=n!==void 0&&n.format!==void 0?n.format:"RGB",o=n!==void 0&&((u=n.norm)===null||u===void 0?void 0:u.mean)!==void 0?n.norm.mean:255,t=n!==void 0&&((d=n.norm)===null||d===void 0?void 0:d.bias)!==void 0?n.norm.bias:0,e=h*s;if(n!==void 0){if(n.height!==void 0&&n.height!==h)throw new Error("Image output config height doesn't match tensor height");if(n.width!==void 0&&n.width!==s)throw new Error("Image output config width doesn't match tensor width");if(n.format!==void 0&&f===4&&n.format!=="RGBA"||f===3&&n.format!=="RGB"&&n.format!=="BGR")throw new Error("Tensor format doesn't match input tensor dims")}const r=4;let i=0,c=1,g=2,m=3,b=0,_=e,w=e*2,v=-1;a==="RGBA"?(b=0,_=e,w=e*2,v=e*3):a==="RGB"?(b=0,_=e,w=e*2):a==="RBG"&&(b=0,w=e,_=e*2),p=l.createImageData(s,h);for(let S=0;S"u")throw new Error(`input '${a}' is missing in 'feeds'.`);if(s)for(const a of this.outputNames)l[a]=null;const h=await this.handler.run(n,l,p),f={};for(const a in h)Object.hasOwnProperty.call(h,a)&&(f[a]=new Tensor$1(h[a].type,h[a].data,h[a].dims));return f}static async create(n,u,d,l){let p,s={};if(typeof n=="string"){if(p=n,typeof u=="object"&&u!==null)s=u;else if(typeof u<"u")throw new TypeError("'options' must be an object.")}else if(n instanceof Uint8Array){if(p=n,typeof u=="object"&&u!==null)s=u;else if(typeof u<"u")throw new TypeError("'options' must be an object.")}else if(n instanceof ArrayBuffer||typeof SharedArrayBuffer<"u"&&n instanceof SharedArrayBuffer){const t=n;let e=0,r=n.byteLength;if(typeof u=="object"&&u!==null)s=u;else if(typeof u=="number"){if(e=u,!Number.isSafeInteger(e))throw new RangeError("'byteOffset' must be an integer.");if(e<0||e>=t.byteLength)throw new RangeError(`'byteOffset' is out of range [0, ${t.byteLength}).`);if(r=n.byteLength-e,typeof d=="number"){if(r=d,!Number.isSafeInteger(r))throw new RangeError("'byteLength' must be an integer.");if(r<=0||e+r>t.byteLength)throw new RangeError(`'byteLength' is out of range (0, ${t.byteLength-e}].`);if(typeof l=="object"&&l!==null)s=l;else if(typeof l<"u")throw new TypeError("'options' must be an object.")}else if(typeof d<"u")throw new TypeError("'byteLength' must be a number.")}else if(typeof u<"u")throw new TypeError("'options' must be an object.");p=new Uint8Array(t,e,r)}else throw new TypeError("Unexpected argument[0]: must be 'path' or 'buffer'.");const f=(s.executionProviders||[]).map(t=>typeof t=="string"?t:t.name),o=await(await resolveBackend(f)).createSessionHandler(p,s);return new dn(o)}startProfiling(){this.handler.startProfiling()}endProfiling(){this.handler.endProfiling()}get inputNames(){return this.handler.inputNames}get outputNames(){return this.handler.outputNames}};const InferenceSession$1=InferenceSession$2;var lib=Object.freeze({__proto__:null,InferenceSession:InferenceSession$1,Tensor:Tensor$1,env:env$1,registerBackend}),require$$0=getAugmentedNamespace(lib);/*!
-* ONNX Runtime Web v1.14.0
-* Copyright (c) Microsoft Corporation. All rights reserved.
-* Licensed under the MIT License.
-*/(function(module,exports){(function(y,n){module.exports=n(require$$0)})(self,__WEBPACK_EXTERNAL_MODULE__1670__=>(()=>{var __webpack_modules__={3474:(y,n,u)=>{var d,l=(d=(d=typeof document<"u"&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(p){function s(){return X.buffer!=ne&&Ee(X.buffer),me}function h(){return X.buffer!=ne&&Ee(X.buffer),Ie}function f(){return X.buffer!=ne&&Ee(X.buffer),Oe}function a(){return X.buffer!=ne&&Ee(X.buffer),ce}function o(){return X.buffer!=ne&&Ee(X.buffer),Te}var t,e,r;p=p||{},t||(t=p!==void 0?p:{}),t.ready=new Promise(function(x,A){e=x,r=A});var i,c,g,m,b,_,w=Object.assign({},t),v="./this.program",S=(x,A)=>{throw A},O=typeof window=="object",E=typeof importScripts=="function",T=typeof process=="object"&&typeof process.versions=="object"&&typeof process.versions.node=="string",I=t.ENVIRONMENT_IS_PTHREAD||!1,C="";function B(x){return t.locateFile?t.locateFile(x,C):C+x}if(T){let x;C=E?u(908).dirname(C)+"/":"//",_=()=>{b||(m=u(1384),b=u(908))},i=function(A,k){return _(),A=b.normalize(A),m.readFileSync(A,k?void 0:"utf8")},g=A=>((A=i(A,!0)).buffer||(A=new Uint8Array(A)),A),c=(A,k,M)=>{_(),A=b.normalize(A),m.readFile(A,function(j,V){j?M(j):k(V.buffer)})},1{if(Ve())throw process.exitCode=A,k;k instanceof Ze||z("exiting due to exception: "+k),process.exit(A)},t.inspect=function(){return"[Emscripten Module object]"};try{x=u(9925)}catch(A){throw console.error('The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?'),A}u.g.Worker=x.Worker}else(O||E)&&(E?C=self.location.href:typeof document<"u"&&document.currentScript&&(C=document.currentScript.src),d&&(C=d),C=C.indexOf("blob:")!==0?C.substr(0,C.replace(/[?#].*/,"").lastIndexOf("/")+1):"",T||(i=x=>{var A=new XMLHttpRequest;return A.open("GET",x,!1),A.send(null),A.responseText},E&&(g=x=>{var A=new XMLHttpRequest;return A.open("GET",x,!1),A.responseType="arraybuffer",A.send(null),new Uint8Array(A.response)}),c=(x,A,k)=>{var M=new XMLHttpRequest;M.open("GET",x,!0),M.responseType="arraybuffer",M.onload=()=>{M.status==200||M.status==0&&M.response?A(M.response):k()},M.onerror=k,M.send(null)}));T&&typeof performance>"u"&&(u.g.performance=u(6953).performance);var F=console.log.bind(console),N=console.warn.bind(console);T&&(_(),F=x=>m.writeSync(1,x+`
-`),N=x=>m.writeSync(2,x+`
-`));var H,$=t.print||F,z=t.printErr||N;Object.assign(t,w),w=null,t.thisProgram&&(v=t.thisProgram),t.quit&&(S=t.quit),t.wasmBinary&&(H=t.wasmBinary);var J=t.noExitRuntime||!1;typeof WebAssembly!="object"&&pe("no native wasm support detected");var X,te,ne,me,Ie,Oe,ce,Te,_e=!1,Le=typeof TextDecoder<"u"?new TextDecoder("utf8"):void 0;function We(x,A,k){var M=(A>>>=0)+k;for(k=A;x[k]&&!(k>=M);)++k;if(16(j=(240&j)==224?(15&j)<<12|V<<6|K:(7&j)<<18|V<<12|K<<6|63&x[A++])?M+=String.fromCharCode(j):(j-=65536,M+=String.fromCharCode(55296|j>>10,56320|1023&j))}}else M+=String.fromCharCode(j)}return M}function Ae(x,A){return(x>>>=0)?We(h(),x,A):""}function Ce(x,A,k,M){if(!(0>>=0;M=k+M-1;for(var V=0;V=K&&(K=65536+((1023&K)<<10)|1023&x.charCodeAt(++V)),127>=K){if(k>=M)break;A[k++>>>0]=K}else{if(2047>=K){if(k+1>=M)break;A[k++>>>0]=192|K>>6}else{if(65535>=K){if(k+2>=M)break;A[k++>>>0]=224|K>>12}else{if(k+3>=M)break;A[k++>>>0]=240|K>>18,A[k++>>>0]=128|K>>12&63}A[k++>>>0]=128|K>>6&63}A[k++>>>0]=128|63&K}}return A[k>>>0]=0,k-j}function Me(x){for(var A=0,k=0;k=M?A++:2047>=M?A+=2:55296<=M&&57343>=M?(A+=4,++k):A+=3}return A}function Ee(x){ne=x,t.HEAP8=me=new Int8Array(x),t.HEAP16=new Int16Array(x),t.HEAP32=Oe=new Int32Array(x),t.HEAPU8=Ie=new Uint8Array(x),t.HEAPU16=new Uint16Array(x),t.HEAPU32=ce=new Uint32Array(x),t.HEAPF32=new Float32Array(x),t.HEAPF64=Te=new Float64Array(x)}I&&(ne=t.buffer);var ve=t.INITIAL_MEMORY||16777216;if(I)X=t.wasmMemory,ne=t.buffer;else if(t.wasmMemory)X=t.wasmMemory;else if(!((X=new WebAssembly.Memory({initial:ve/65536,maximum:65536,shared:!0})).buffer instanceof SharedArrayBuffer))throw z("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"),T&&console.log("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)"),Error("bad memory");X&&(ne=X.buffer),ve=ne.byteLength,Ee(ne);var je,ze=[],Ue=[],He=[],Ke=[];function Ve(){return J||!1}function Be(){var x=t.preRun.shift();ze.unshift(x)}var Se,Fe=0,Xe=null;function pe(x){throw I?postMessage({cmd:"onAbort",arg:x}):t.onAbort&&t.onAbort(x),z(x="Aborted("+x+")"),_e=!0,x=new WebAssembly.RuntimeError(x+". Build with -sASSERTIONS for more info."),r(x),x}function ht(){return Se.startsWith("data:application/octet-stream;base64,")}function ut(){var x=Se;try{if(x==Se&&H)return new Uint8Array(H);if(g)return g(x);throw"both async and sync fetching of the wasm failed"}catch(A){pe(A)}}Se="ort-wasm-threaded.wasm",ht()||(Se=B(Se));var Et={};function Ze(x){this.name="ExitStatus",this.message="Program terminated with exit("+x+")",this.status=x}function lt(x){(x=re.Vb[x])||pe(),re.mc(x)}function ct(x){var A=re.Cc();if(!A)return 6;re.ac.push(A),re.Vb[x.Ub]=A,A.Ub=x.Ub;var k={cmd:"run",start_routine:x.Ic,arg:x.zc,pthread_ptr:x.Ub};return A.$b=()=>{k.time=performance.now(),A.postMessage(k,x.Nc)},A.loaded&&(A.$b(),delete A.$b),0}function Ne(x){if(I)return Z(1,1,x);Ve()||(re.oc(),t.onExit&&t.onExit(x),_e=!0),S(x,new Ze(x))}function rt(x,A){if(!A&&I)throw It(x),"unwind";Ve()||I||(Wt(),nt(He),qt(0),Ct[1].length&&Nt(1,10),Ct[2].length&&Nt(2,10),re.oc()),Ne(x)}var re={Yb:[],ac:[],qc:[],Vb:{},fc:function(){I&&re.Ec()},Pc:function(){},Ec:function(){re.receiveObjectTransfer=re.Gc,re.threadInitTLS=re.pc,re.setExitStatus=re.nc,J=!1},nc:function(){},oc:function(){for(var x of Object.values(re.Vb))re.mc(x);for(x of re.Yb)x.terminate();re.Yb=[]},mc:function(x){var A=x.Ub;delete re.Vb[A],re.Yb.push(x),re.ac.splice(re.ac.indexOf(x),1),x.Ub=0,Ft(A)},Gc:function(){},pc:function(){re.qc.forEach(x=>x())},Fc:function(x,A){x.onmessage=k=>{var M=(k=k.data).cmd;if(x.Ub&&(re.Bc=x.Ub),k.targetThread&&k.targetThread!=Dt()){var j=re.Vb[k.Qc];j?j.postMessage(k,k.transferList):z('Internal error! Worker sent a message "'+M+'" to target pthread '+k.targetThread+", but that thread no longer exists!")}else M==="processProxyingQueue"?L(k.queue):M==="spawnThread"?ct(k):M==="cleanupThread"?lt(k.thread):M==="killThread"?(k=k.thread,M=re.Vb[k],delete re.Vb[k],M.terminate(),Ft(k),re.ac.splice(re.ac.indexOf(M),1),M.Ub=0):M==="cancelThread"?re.Vb[k.thread].postMessage({cmd:"cancel"}):M==="loaded"?(x.loaded=!0,A&&A(x),x.$b&&(x.$b(),delete x.$b)):M==="print"?$("Thread "+k.threadId+": "+k.text):M==="printErr"?z("Thread "+k.threadId+": "+k.text):M==="alert"?alert("Thread "+k.threadId+": "+k.text):k.target==="setimmediate"?x.postMessage(k):M==="onAbort"?t.onAbort&&t.onAbort(k.arg):M&&z("worker sent an unknown command "+M);re.Bc=void 0},x.onerror=k=>{throw z("worker sent an error! "+k.filename+":"+k.lineno+": "+k.message),k},T&&(x.on("message",function(k){x.onmessage({data:k})}),x.on("error",function(k){x.onerror(k)}),x.on("detachedExit",function(){})),x.postMessage({cmd:"load",urlOrBlob:t.mainScriptUrlOrBlob||d,wasmMemory:X,wasmModule:te})},yc:function(){var x=B("ort-wasm-threaded.worker.js");re.Yb.push(new Worker(x))},Cc:function(){return re.Yb.length==0&&(re.yc(),re.Fc(re.Yb[0])),re.Yb.pop()}};function nt(x){for(;0>2>>>0];x=f()[x+48>>2>>>0],Zt(A,A-x),ue(A)};var Je=[];function ye(x){var A=Je[x];return A||(x>=Je.length&&(Je.length=x+1),Je[x]=A=je.get(x)),A}t.invokeEntryPoint=function(x,A){x=ye(x)(A),Ve()?re.nc(x):Kt(x)};var it,pt,ot=[],se=0,ie=0;function oe(x){this.Zb=x,this.Sb=x-24,this.xc=function(A){a()[this.Sb+4>>2>>>0]=A},this.bc=function(){return a()[this.Sb+4>>2>>>0]},this.wc=function(A){a()[this.Sb+8>>2>>>0]=A},this.Dc=function(){return a()[this.Sb+8>>2>>>0]},this.rc=function(){f()[this.Sb>>2>>>0]=0},this.hc=function(A){A=A?1:0,s()[this.Sb+12>>0>>>0]=A},this.uc=function(){return s()[this.Sb+12>>0>>>0]!=0},this.ic=function(A){A=A?1:0,s()[this.Sb+13>>0>>>0]=A},this.kc=function(){return s()[this.Sb+13>>0>>>0]!=0},this.fc=function(A,k){this.cc(0),this.xc(A),this.wc(k),this.rc(),this.hc(!1),this.ic(!1)},this.sc=function(){Atomics.add(f(),this.Sb>>2,1)},this.Hc=function(){return Atomics.sub(f(),this.Sb>>2,1)===1},this.cc=function(A){a()[this.Sb+16>>2>>>0]=A},this.tc=function(){return a()[this.Sb+16>>2>>>0]},this.vc=function(){if(Jt(this.bc()))return a()[this.Zb>>2>>>0];var A=this.tc();return A!==0?A:this.Zb}}function ft(x){return Gt(new oe(x).Sb)}function st(x,A,k,M){return I?Z(3,1,x,A,k,M):gt(x,A,k,M)}function gt(x,A,k,M){if(typeof SharedArrayBuffer>"u")return z("Current environment does not support SharedArrayBuffer, pthreads are not available!"),6;var j=[];return I&&j.length===0?st(x,A,k,M):(x={Ic:k,Ub:x,zc:M,Nc:j},I?(x.Oc="spawnThread",postMessage(x,j),0):ct(x))}function mt(x,A,k){return I?Z(4,1,x,A,k):0}function bt(x,A){if(I)return Z(5,1,x,A)}function yt(x,A){if(I)return Z(6,1,x,A)}function _t(x,A,k){if(I)return Z(7,1,x,A,k)}function wt(x,A,k){return I?Z(8,1,x,A,k):0}function vt(x,A){if(I)return Z(9,1,x,A)}function Tt(x,A,k){if(I)return Z(10,1,x,A,k)}function xt(x,A,k,M){if(I)return Z(11,1,x,A,k,M)}function St(x,A,k,M){if(I)return Z(12,1,x,A,k,M)}function Ot(x,A,k,M){if(I)return Z(13,1,x,A,k,M)}function At(x){if(I)return Z(14,1,x)}function P(x,A){if(I)return Z(15,1,x,A)}function D(x,A,k){if(I)return Z(16,1,x,A,k)}function L(x){Atomics.store(f(),x>>2,1),Dt()&&Yt(x),Atomics.compareExchange(f(),x>>2,1,0)}function R(x){return a()[x>>>2]+4294967296*f()[x+4>>>2]}function U(x,A,k,M,j,V){return I?Z(17,1,x,A,k,M,j,V):-52}function W(x,A,k,M,j,V){if(I)return Z(18,1,x,A,k,M,j,V)}function Y(x){var A=Me(x)+1,k=Lt(A);return k&&Ce(x,s(),k,A),k}function Q(x,A,k){function M(fe){return(fe=fe.toTimeString().match(/\(([A-Za-z ]+)\)$/))?fe[1]:"GMT"}if(I)return Z(19,1,x,A,k);var j=new Date().getFullYear(),V=new Date(j,0,1),K=new Date(j,6,1);j=V.getTimezoneOffset();var ee=K.getTimezoneOffset(),he=Math.max(j,ee);f()[x>>2>>>0]=60*he,f()[A>>2>>>0]=+(j!=ee),x=M(V),A=M(K),x=Y(x),A=Y(A),ee>2>>>0]=x,a()[k+4>>2>>>0]=A):(a()[k>>2>>>0]=A,a()[k+4>>2>>>0]=x)}function Z(x,A){var k=arguments.length-2,M=arguments;return Pt(()=>{for(var j=Rt(8*k),V=j>>3,K=0;K>>0]=ee}return Xt(x,k,j,A)})}t.executeNotifiedProxyingQueue=L,pt=T?()=>{var x=process.hrtime();return 1e3*x[0]+x[1]/1e6}:I?()=>performance.now()-t.__performance_now_clock_drift:()=>performance.now();var ae,we=[],$e={};function De(){if(!ae){var x,A={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:(typeof navigator=="object"&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:v||"./this.program"};for(x in $e)$e[x]===void 0?delete A[x]:A[x]=$e[x];var k=[];for(x in A)k.push(x+"="+A[x]);ae=k}return ae}function G(x,A){if(I)return Z(20,1,x,A);var k=0;return De().forEach(function(M,j){var V=A+k;for(j=a()[x+4*j>>2>>>0]=V,V=0;V>0>>>0]=M.charCodeAt(V);s()[j>>0>>>0]=0,k+=M.length+1}),0}function ge(x,A){if(I)return Z(21,1,x,A);var k=De();a()[x>>2>>>0]=k.length;var M=0;return k.forEach(function(j){M+=j.length+1}),a()[A>>2>>>0]=M,0}function xe(x){return I?Z(22,1,x):52}function Ge(x,A,k,M){return I?Z(23,1,x,A,k,M):52}function Qe(x,A,k,M,j){return I?Z(24,1,x,A,k,M,j):70}var Ct=[null,[],[]];function Nt(x,A){var k=Ct[x];A===0||A===10?((x===1?$:z)(We(k,0)),k.length=0):k.push(A)}function Bt(x,A,k,M){if(I)return Z(25,1,x,A,k,M);for(var j=0,V=0;V>2>>>0],ee=a()[A+4>>2>>>0];A+=8;for(var he=0;he>>0]);j+=ee}return a()[M>>2>>>0]=j,0}var Re=0;function kt(x){return x%4==0&&(x%100!=0||x%400==0)}var zt=[31,29,31,30,31,30,31,31,30,31,30,31],Ut=[31,28,31,30,31,30,31,31,30,31,30,31];function Vt(x,A,k,M){function j(q,be,Pe){for(q=typeof q=="number"?q.toString():q||"";q.lengthdt?-1:0