diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/__init__.py deleted file mode 100644 index 5d9e80c19e255137edeedbcc5a7a361ae600e1f9..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/__init__.py +++ /dev/null @@ -1,478 +0,0 @@ -import json -from datetime import datetime -from hashlib import md5 -from json import dumps -from pathlib import Path -from random import choice, choices, randint -from re import search, findall -from string import ascii_letters, digits -from typing import Optional, Union, List, Any, Generator -from urllib.parse import unquote - -import selenium.webdriver.support.expected_conditions as EC -from fake_useragent import UserAgent -from pydantic import BaseModel -from pypasser import reCaptchaV3 -from requests import Session -from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions -from selenium.webdriver.common.by import By -from selenium.webdriver.support.wait import WebDriverWait -from tls_client import Session as TLS - -from .api import Client as PoeClient -from .mail import Emailnator - -SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not -being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location -to your system\'s PATH.\n\nHere are the steps to resolve the issue:\n\n1. Download the geckodriver for your platform -(Windows, macOS, or Linux) from the following link: https://github.com/mozilla/geckodriver/releases\n\n2. Extract the -downloaded archive and locate the geckodriver executable.\n\n3. Add the geckodriver executable to your system\'s -PATH.\n\nFor macOS and Linux:\n\n- Open a terminal window.\n- Move the geckodriver executable to a directory that is -already in your PATH, or create a new directory and add it to your PATH:\n\n```bash\n# Example: Move geckodriver to -/usr/local/bin\nmv /path/to/your/geckodriver /usr/local/bin\n```\n\n- If you created a new directory, add it to your -PATH:\n\n```bash\n# Example: Add a new directory to PATH\nexport PATH=$PATH:/path/to/your/directory\n```\n\nFor -Windows:\n\n- Right-click on "My Computer" or "This PC" and select "Properties".\n- Click on "Advanced system -settings".\n- Click on the "Environment Variables" button.\n- In the "System variables" section, find the "Path" -variable, select it, and click "Edit".\n- Click "New" and add the path to the directory containing the geckodriver -executable.\n\nAfter adding the geckodriver to your PATH, restart your terminal or command prompt and try running -your script again. The error should be resolved.''' - -# from twocaptcha import TwoCaptcha -# solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358') - -MODELS = { - 'Sage': 'capybara', - 'GPT-4': 'beaver', - 'Claude+': 'a2_2', - 'Claude-instant': 'a2', - 'ChatGPT': 'chinchilla', - 'Dragonfly': 'nutria', - 'NeevaAI': 'hutia', -} - - -def extract_formkey(html): - script_regex = r'' - script_text = search(script_regex, html).group(1) - key_regex = r'var .="([0-9a-f]+)",' - key_text = search(key_regex, script_text).group(1) - cipher_regex = r'.\[(\d+)\]=.\[(\d+)\]' - cipher_pairs = findall(cipher_regex, script_text) - - formkey_list = [''] * len(cipher_pairs) - for pair in cipher_pairs: - formkey_index, key_index = map(int, pair) - formkey_list[formkey_index] = key_text[key_index] - formkey = ''.join(formkey_list) - - return formkey - - -class Choice(BaseModel): - text: str - index: int - logprobs: Any - finish_reason: str - - -class Usage(BaseModel): - prompt_tokens: int - completion_tokens: int - total_tokens: int - - -class PoeResponse(BaseModel): - id: int - object: str - created: int - model: str - choices: List[Choice] - usage: Usage - text: str - - -class ModelResponse: - def __init__(self, json_response: dict) -> None: - self.id = json_response['data']['poeBotCreate']['bot']['id'] - self.name = json_response['data']['poeBotCreate']['bot']['displayName'] - self.limit = json_response['data']['poeBotCreate']['bot']['messageLimit']['dailyLimit'] - self.deleted = json_response['data']['poeBotCreate']['bot']['deletionState'] - - -class Model: - @staticmethod - def create( - token: str, - model: str = 'gpt-3.5-turbo', # claude-instant - system_prompt: str = 'You are ChatGPT a large language model. Answer as consisely as possible', - description: str = 'gpt-3.5 language model', - handle: str = None, - ) -> ModelResponse: - if not handle: - handle = f'gptx{randint(1111111, 9999999)}' - - client = Session() - client.cookies['p-b'] = token - - formkey = extract_formkey(client.get('https://poe.com').text) - settings = client.get('https://poe.com/api/settings').json() - - client.headers = { - 'host': 'poe.com', - 'origin': 'https://poe.com', - 'referer': 'https://poe.com/', - 'poe-formkey': formkey, - 'poe-tchannel': settings['tchannelData']['channel'], - 'user-agent': UserAgent().random, - 'connection': 'keep-alive', - 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'content-type': 'application/json', - 'sec-fetch-site': 'same-origin', - 'sec-fetch-mode': 'cors', - 'sec-fetch-dest': 'empty', - 'accept': '*/*', - 'accept-encoding': 'gzip, deflate, br', - 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8', - } - - payload = dumps( - separators=(',', ':'), - obj={ - 'queryName': 'CreateBotMain_poeBotCreate_Mutation', - 'variables': { - 'model': MODELS[model], - 'handle': handle, - 'prompt': system_prompt, - 'isPromptPublic': True, - 'introduction': '', - 'description': description, - 'profilePictureUrl': 'https://qph.fs.quoracdn.net/main-qimg-24e0b480dcd946e1cc6728802c5128b6', - 'apiUrl': None, - 'apiKey': ''.join(choices(ascii_letters + digits, k=32)), - 'isApiBot': False, - 'hasLinkification': False, - 'hasMarkdownRendering': False, - 'hasSuggestedReplies': False, - 'isPrivateBot': False, - }, - 'query': 'mutation CreateBotMain_poeBotCreate_Mutation(\n $model: String!\n $handle: String!\n $prompt: String!\n $isPromptPublic: Boolean!\n $introduction: String!\n $description: String!\n $profilePictureUrl: String\n $apiUrl: String\n $apiKey: String\n $isApiBot: Boolean\n $hasLinkification: Boolean\n $hasMarkdownRendering: Boolean\n $hasSuggestedReplies: Boolean\n $isPrivateBot: Boolean\n) {\n poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {\n status\n bot {\n id\n ...BotHeader_bot\n }\n }\n}\n\nfragment BotHeader_bot on Bot {\n displayName\n messageLimit {\n dailyLimit\n }\n ...BotImage_bot\n ...BotLink_bot\n ...IdAnnotation_node\n ...botHelpers_useViewerCanAccessPrivateBot\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotImage_bot on Bot {\n displayName\n ...botHelpers_useDeletion_bot\n ...BotImage_useProfileImage_bot\n}\n\nfragment BotImage_useProfileImage_bot on Bot {\n image {\n __typename\n ... on LocalBotImage {\n localName\n }\n ... on UrlBotImage {\n url\n }\n }\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotLink_bot on Bot {\n displayName\n}\n\nfragment IdAnnotation_node on Node {\n __isNode: __typename\n id\n}\n\nfragment botHelpers_useDeletion_bot on Bot {\n deletionState\n}\n\nfragment botHelpers_useViewerCanAccessPrivateBot on Bot {\n isPrivateBot\n viewerIsCreator\n}\n', - }, - ) - - base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k' - client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest() - - response = client.post('https://poe.com/api/gql_POST', data=payload) - - if 'success' not in response.text: - raise Exception( - ''' - Bot creation Failed - !! Important !! - Bot creation was not enabled on this account - please use: quora.Account.create with enable_bot_creation set to True - ''' - ) - - return ModelResponse(response.json()) - - -class Account: - @staticmethod - def create( - proxy: Optional[str] = None, - logging: bool = False, - enable_bot_creation: bool = False, - ): - client = TLS(client_identifier='chrome110') - client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else {} - - mail_client = Emailnator() - mail_address = mail_client.get_mail() - - if logging: - print('email', mail_address) - - client.headers = { - 'authority': 'poe.com', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'content-type': 'application/json', - 'origin': 'https://poe.com', - 'poe-tag-id': 'null', - 'referer': 'https://poe.com/login', - 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', - 'poe-formkey': extract_formkey(client.get('https://poe.com/login').text), - 'poe-tchannel': client.get('https://poe.com/api/settings').json()['tchannelData']['channel'], - } - - token = reCaptchaV3( - 'https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal' - ) - # token = solver.recaptcha(sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG', - # url = 'https://poe.com/login?redirect_url=%2F', - # version = 'v3', - # enterprise = 1, - # invisible = 1, - # action = 'login',)['code'] - - payload = dumps( - separators=(',', ':'), - obj={ - 'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation', - 'variables': { - 'emailAddress': mail_address, - 'phoneNumber': None, - 'recaptchaToken': token, - }, - 'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n', - }, - ) - - base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k' - client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest() - - print(dumps(client.headers, indent=4)) - - response = client.post('https://poe.com/api/gql_POST', data=payload) - - if 'automated_request_detected' in response.text: - print('please try using a proxy / wait for fix') - - if 'Bad Request' in response.text: - if logging: - print('bad request, retrying...', response.json()) - quit() - - if logging: - print('send_code', response.json()) - - mail_content = mail_client.get_message() - mail_token = findall(r';">(\d{6,7})', mail_content)[0] - - if logging: - print('code', mail_token) - - payload = dumps( - separators=(',', ':'), - obj={ - 'queryName': 'SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation', - 'variables': { - 'verificationCode': str(mail_token), - 'emailAddress': mail_address, - 'phoneNumber': None, - }, - 'query': 'mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n $verificationCode: String!\n $emailAddress: String\n $phoneNumber: String\n) {\n signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n status\n errorMessage\n }\n}\n', - }, - ) - - base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k' - client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest() - - response = client.post('https://poe.com/api/gql_POST', data=payload) - if logging: - print('verify_code', response.json()) - - def get(self): - cookies = open(Path(__file__).resolve().parent / 'cookies.txt', 'r').read().splitlines() - return choice(cookies) - - @staticmethod - def delete(token: str, proxy: Optional[str] = None): - client = PoeClient(token, proxy=proxy) - client.delete_account() - - -class StreamingCompletion: - @staticmethod - def create( - model: str = 'gpt-4', - custom_model: bool = None, - prompt: str = 'hello world', - token: str = '', - proxy: Optional[str] = None, - ) -> Generator[PoeResponse, None, None]: - _model = MODELS[model] if not custom_model else custom_model - - proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False - client = PoeClient(token) - client.proxy = proxies - - for chunk in client.send_message(_model, prompt): - yield PoeResponse( - **{ - 'id': chunk['messageId'], - 'object': 'text_completion', - 'created': chunk['creationTime'], - 'model': _model, - 'text': chunk['text_new'], - 'choices': [ - { - 'text': chunk['text_new'], - 'index': 0, - 'logprobs': None, - 'finish_reason': 'stop', - } - ], - 'usage': { - 'prompt_tokens': len(prompt), - 'completion_tokens': len(chunk['text_new']), - 'total_tokens': len(prompt) + len(chunk['text_new']), - }, - } - ) - - -class Completion: - @staticmethod - def create( - model: str = 'gpt-4', - custom_model: str = None, - prompt: str = 'hello world', - token: str = '', - proxy: Optional[str] = None, - ) -> PoeResponse: - _model = MODELS[model] if not custom_model else custom_model - - proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False - client = PoeClient(token) - client.proxy = proxies - - chunk = None - for response in client.send_message(_model, prompt): - chunk = response - - return PoeResponse( - **{ - 'id': chunk['messageId'], - 'object': 'text_completion', - 'created': chunk['creationTime'], - 'model': _model, - 'text': chunk['text'], - 'choices': [ - { - 'text': chunk['text'], - 'index': 0, - 'logprobs': None, - 'finish_reason': 'stop', - } - ], - 'usage': { - 'prompt_tokens': len(prompt), - 'completion_tokens': len(chunk['text']), - 'total_tokens': len(prompt) + len(chunk['text']), - }, - } - ) - - -class Poe: - def __init__( - self, - model: str = 'ChatGPT', - driver: str = 'firefox', - download_driver: bool = False, - driver_path: Optional[str] = None, - cookie_path: str = './quora/cookie.json', - ): - # validating the model - if model and model not in MODELS: - raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.') - self.model = MODELS[model] - self.cookie_path = cookie_path - self.cookie = self.__load_cookie(driver, driver_path=driver_path) - self.client = PoeClient(self.cookie) - - def __load_cookie(self, driver: str, driver_path: Optional[str] = None) -> str: - if (cookie_file := Path(self.cookie_path)).exists(): - with cookie_file.open() as fp: - cookie = json.load(fp) - if datetime.fromtimestamp(cookie['expiry']) < datetime.now(): - cookie = self.__register_and_get_cookie(driver, driver_path=driver_path) - else: - print('Loading the cookie from file') - else: - cookie = self.__register_and_get_cookie(driver, driver_path=driver_path) - - return unquote(cookie['value']) - - def __register_and_get_cookie(self, driver: str, driver_path: Optional[str] = None) -> dict: - mail_client = Emailnator() - mail_address = mail_client.get_mail() - - driver = self.__resolve_driver(driver, driver_path=driver_path) - driver.get("https://www.poe.com") - - # clicking use email button - driver.find_element(By.XPATH, '//button[contains(text(), "Use email")]').click() - - email = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, '//input[@type="email"]'))) - email.send_keys(mail_address) - driver.find_element(By.XPATH, '//button[text()="Go"]').click() - - code = findall(r';">(\d{6,7})', mail_client.get_message())[0] - print(code) - - verification_code = WebDriverWait(driver, 30).until( - EC.presence_of_element_located((By.XPATH, '//input[@placeholder="Code"]')) - ) - verification_code.send_keys(code) - verify_button = EC.presence_of_element_located((By.XPATH, '//button[text()="Verify"]')) - login_button = EC.presence_of_element_located((By.XPATH, '//button[text()="Log In"]')) - - WebDriverWait(driver, 30).until(EC.any_of(verify_button, login_button)).click() - - cookie = driver.get_cookie('p-b') - - with open(self.cookie_path, 'w') as fw: - json.dump(cookie, fw) - - driver.close() - return cookie - - @staticmethod - def __resolve_driver(driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]: - options = FirefoxOptions() if driver == 'firefox' else ChromeOptions() - options.add_argument('-headless') - - if driver_path: - options.binary_location = driver_path - try: - return Firefox(options=options) if driver == 'firefox' else Chrome(options=options) - except Exception: - raise Exception(SELENIUM_WEB_DRIVER_ERROR_MSG) - - def chat(self, message: str, model: Optional[str] = None) -> str: - if model and model not in MODELS: - raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.') - model = MODELS[model] if model else self.model - response = None - for chunk in self.client.send_message(model, message): - response = chunk['text'] - return response - - def create_bot(self, name: str, /, prompt: str = '', base_model: str = 'ChatGPT', description: str = '') -> None: - if base_model not in MODELS: - raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.') - - response = self.client.create_bot( - handle=name, - prompt=prompt, - base_model=MODELS[base_model], - description=description, - ) - print(f'Successfully created bot with name: {response["bot"]["displayName"]}') - - def list_bots(self) -> list: - return list(self.client.bot_names.values()) - - def delete_account(self) -> None: - self.client.delete_account() diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/text/cleaners.py b/spaces/123Kumar/vits-uma-genshin-honkai123/text/cleaners.py deleted file mode 100644 index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000 --- a/spaces/123Kumar/vits-uma-genshin-honkai123/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i -

watch aaja nachle eng sub free download hindi movie on internet for free at hindiganadownload.com. as hindiganadownload.com is an best online website to download movies in hd quality. you can stream latest bollywood movies here on your mobile, pc, tablet.

-

Aaja Nachle Eng Sub Free Downloa


Download Filehttps://imgfil.com/2uxZu0



-

download aaja nachle (2007) full movie in in 480p, 720p, 1080p quality. the short story of this movies is dias dance teacher is dying. she returns to the town where she learnt to live and dance and most importantly to save the endangered ajanta theatre. this movie is based on drama, family, music and available in hindi.

-

aaja nachle free download full hd, 720p 720, 1080p 1080, mp3 audio songs, mp3 songs, divx songs, top star mp3 songs, mayank mp3 songs, kareena kapoor mp3 songs, hrithik mp3 songs, akshay kumar mp3 songs, katrina kaif mp3 songs, katrina kaif, twinkle twinkle song,aravind saagar

-

watch aaja nachle (2007) full movie. aaja nachle mp3 song. aajnachle free download full movie 1080p & 720p, aaja nachle free full movie 1080p, 720p 720, music player free, music songs, playlist track, playlist, music albums, facebook,download, tagalog hindi aajnachle - aaja nachle (2007) full movie. mp3 song. aaja nachle free full movie 1080p & 720p, aaja nachle free full movie 1080p, 720p 720, music player free, music songs, playlist track, playlist, music albums, facebook,download, tagalog hindi aajnachle - aaja nachle (2007) full movie.

-

aaja nachle full movie download 720p, 1080p, download aaja nachle full movie 1080p, aaja nachle full movie 1080p, download aaja nachle full movie 720p, download aaja nachle full movie 1080p 720p, download aaja nachle full movie 720p, download aaja nachle full movie 1080p, download aaja nachle full movie 720p free download, download aaja nachle full movie 720p, download aaja nachle full movie 1080p 720p free download,download aaja nachle full movie free download 720p, download aaja nachle full movie free 720p 1080p, download aaja nachle full movie free 720p 1080p, download aaja nachle full movie free 720p 1080p

-

899543212b
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Amada Ap100 Software Crack 28.md b/spaces/1gistliPinn/ChatGPT4/Examples/Amada Ap100 Software Crack 28.md deleted file mode 100644 index c83c95f8971f6a87d79b0ca869dd095acc640f33..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Amada Ap100 Software Crack 28.md +++ /dev/null @@ -1,7 +0,0 @@ -

amada ap100 software crack 28


Download Ziphttps://imgfil.com/2uxYkR



- -Amada Ap100 Software Crack 28 amada software, amada software support, amada software training, amada software download, amada . Amada Ap100 - free download program for recording. -Amada Ap100 Software 8a78ff9644
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Baka Loader 1.4.md b/spaces/1gistliPinn/ChatGPT4/Examples/Baka Loader 1.4.md deleted file mode 100644 index 06a2fa2ad76064274584edb6d5e27f90f6a596ee..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Baka Loader 1.4.md +++ /dev/null @@ -1,74 +0,0 @@ - -

Baka Loader 1.4: A Review

-

If you are looking for a tool that can help you convert DIB files, enhance your graphics, and use shaders and effects, you might want to check out Baka Loader 1.4. This is a software application that is developed by Windows Software Developer and is part of the Convertdib program. In this article, we will review what Baka Loader 1.4 is, how it works, and what are its advantages and disadvantages.

-

What is Baka Loader 1.4?

-

Baka Loader 1.4 is an executable file that runs on your PC and allows you to convert DIB files to other formats, such as BMP, JPG, PNG, etc. DIB files are device-independent bitmap files that are used to store graphics data. They are often used by Windows applications and games, but they are not compatible with some other programs or devices. Baka Loader 1.4 can help you convert DIB files to more common formats that can be opened by other software or hardware.

-

Baka Loader 1.4


Download File ››››› https://imgfil.com/2uy024



-

Besides converting DIB files, Baka Loader 1.4 also lets you enhance your graphics by using shaders and effects. Shaders are programs that run on your graphics card and modify the appearance of your images or animations. Effects are visual features that add realism or style to your graphics, such as lighting, shadows, reflections, etc. Baka Loader 1.4 has plenty of shaders and effects built-in and online for free. You can download them from the internet and apply them to your DIB files or other graphics files.

-

How does Baka Loader 1.4 work?

-

To use Baka Loader 1.4, you need to download it from the internet and install it on your PC. The installation process is simple and fast, and it does not require any special skills or knowledge. Once you have installed Baka Loader 1.4, you can run it by double-clicking on the baka.loader.exe file in your program folder.

-

When you run Baka Loader 1.4, you will see a pop-up screen that shows its package name, version, the Chinese vendor name and the symbol of the app. You will also see a menu bar with several options, such as File, Edit, View, Tools, Help, etc. You can use these options to open, save, edit, view, convert, apply shaders and effects, and get help for your DIB files or other graphics files.

-

To convert a DIB file to another format, you need to open it with Baka Loader 1.4 by clicking on File -> Open or by dragging and dropping it into the app window. Then you need to choose the output format from the drop-down list at the bottom of the app window. You can also adjust some settings for the output file, such as quality, size, compression, etc. Then you need to click on File -> Save As or press Ctrl+S to save the converted file in your desired location.

-

To apply shaders and effects to a DIB file or another graphics file, you need to open it with Baka Loader 1.4 as well. Then you need to click on Tools -> Shader Library or press Ctrl+L to open the shader library window. Here you can see a list of available shaders and effects that you can download from the internet or use from your local folder. You can preview each shader or effect by clicking on it and see how it changes the appearance of your file in the app window. You can also adjust some parameters for each shader or effect by using the sliders or checkboxes below the preview window.

-

Once you have chosen the shader or effect that you want to apply to your file, you need to click on Apply or press Enter to confirm your choice. You will see a progress bar showing how long it takes to apply the shader or effect to your file. When it is done, you can save the modified file by clicking on File -> Save As or pressing Ctrl+S.

-

What are the advantages and disadvantages of Baka Loader 1.4?

-

Baka Loader 1.4 has some advantages and disadvantages that you should consider before using it.

-

-

Some of the advantages of Baka Loader 1.4 are:

- -

Some of the disadvantages of Baka Loader 1.4 are:

- -

Conclusion

-

Baka Loader 1.4 is a software application that can help you convert DIB files to other formats and enhance your graphics by using shaders and effects. It is free and easy to use, but it may also have some drawbacks that you should be aware of before using it.

-

If you want to try Baka Loader 1.4 for yourself, you can download it from this link: http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4

-

How to download and install Baka Loader 1.4?

-

If you want to download and install Baka Loader 1.4 on your PC, you need to follow these steps:

-
    -
  1. Go to this link: http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4 and click on the download button.
  2. -
  3. Wait for the download to finish and then open the downloaded file.
  4. -
  5. Follow the instructions on the screen to install Baka Loader 1.4 on your PC.
  6. -
  7. Choose the destination folder where you want to install Baka Loader 1.4 and click on Next.
  8. -
  9. Wait for the installation to complete and then click on Finish.
  10. -
  11. You can now run Baka Loader 1.4 by double-clicking on the baka.loader.exe file in your program folder.
  12. -
-

What are some alternatives to Baka Loader 1.4?

-

Baka Loader 1.4 is not the only tool that can help you convert DIB files and use shaders and effects. There are some other alternatives that you can try if you are not satisfied with Baka Loader 1.4 or if you want to compare different options. Here are some of them:

- -

Conclusion

-

Baka Loader 1.4 is a software application that can help you convert DIB files to other formats and enhance your graphics by using shaders and effects. It is free and easy to use, but it may also have some drawbacks that you should be aware of before using it.

-

If you want to try Baka Loader 1.4 for yourself, you can download it from this link: http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4

-

If you want to learn more about DIB files, shaders, effects, or other graphics topics, you can check out these links:

- -

Conclusion

-

Baka Loader 1.4 is a software application that can help you convert DIB files to other formats and enhance your graphics by using shaders and effects. It is free and easy to use, but it may also have some drawbacks that you should be aware of before using it.

-

If you want to try Baka Loader 1.4 for yourself, you can download it from this link: http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4

-

If you want to learn more about DIB files, shaders, effects, or other graphics topics, you can check out these links:

- - -There is no need to write another conclusion for the article. I hope you are satisfied with the article and thank you for using Bing. Have a nice day. ?

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Contoh Surat Undangan Peneguhan Sidi.md b/spaces/1gistliPinn/ChatGPT4/Examples/Contoh Surat Undangan Peneguhan Sidi.md deleted file mode 100644 index fd81a5f0ff058be2669d13beb2fabdc4a472ce38..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Contoh Surat Undangan Peneguhan Sidi.md +++ /dev/null @@ -1,62 +0,0 @@ -

Contoh Surat Undangan Peneguhan Sidi


Download ✦✦✦ https://imgfil.com/2uy0Sx



-
-10.1 - -05 Counter Objectives - -II. 메시지는 통하며 - -07.1. 소중한 메시지 - -09 Browsers - -07.2. 컴퓨터용 검색기 - -10.1. 종류 - -11.1. 웹 - -11.2. 트래픽 - -11.3. 지금 구매가 있는 것 - -12.1. 웹 - -12.2. 웹 - -12.3. 비밀번호 - -12.4. 클라이언트 - -13.1. 다른 클라이언트 - -14.1. 가가 - -15.1. 블루투스 - -15.2. 크립토스 - -15.3. 시스템 컴퓨터 - -15.4. 월드트래픽 - -16.1. 소프트웨어 시스템 컴퓨터 - -16.2. 기술 컴퓨터 - -16.3. 데이터 컴퓨터 - -16.4. 시스템 컴퓨터 - -16.5. 데이터 컴퓨터 - -16.6. 생성된 데이터 - -17.1. 소프트웨어 - -17.2. 소프트웨어 - -17.3 4fefd39f24
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Game Need For Speed Undercover FREE.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Game Need For Speed Undercover FREE.md deleted file mode 100644 index 09d94546da73c2a55c99da62e89cf9ab479455e8..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Game Need For Speed Undercover FREE.md +++ /dev/null @@ -1,9 +0,0 @@ -

Download Game Need For Speed Undercover


Download Zip →→→ https://imgfil.com/2uy0YT



- -Need for Speed ​​Undercover Walkthrough Part 1 - NFS Undercover Part 1 gameplay featuring Maggie Q. Speed: Undercover is a 2008 racing video game, the twelfth installment in the Need for Speed ​​series. Developed by EA Black Box and published. Need for Speed ​​Undercover # Walkthrough Need for Speed ​​Undercover. -The Need for Speed: Underground 2, NFS: Underground, NFS: Underground 2, Need for Speed ​​No Limits, NFS Undercover, NFS Hot Pursuit, NFS Underground 2, NFS Underground 2, Need for Speed ​​Underground 2, NFS Underground 2, NFS Carbon , NFS Most Wanted, NFS Heat, NFS Carbon. -Walkthrough Need for Speed: Underground 2. Need for Speed: Underground 2 (from English. -Need for Speed ​​No Limits - Arcade Racing Video 8a78ff9644
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Fisika Universitas Jilid 1 Sears Zemansky Pdf 14l ((BETTER)).md b/spaces/1gistliPinn/ChatGPT4/Examples/Fisika Universitas Jilid 1 Sears Zemansky Pdf 14l ((BETTER)).md deleted file mode 100644 index 9d266b6ab9c4522cd1941dc77fd7ec517e2f49c9..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Fisika Universitas Jilid 1 Sears Zemansky Pdf 14l ((BETTER)).md +++ /dev/null @@ -1,12 +0,0 @@ -
-

1.714k likes. Sears dan Zemansky.2000. Rahasia Fisika Universitas Jilid (1) (3). Fisika Universitas Jilid 1, terjemahan Endang Juliastuti.http://www.khybersales.com/2031/07/18/fisika-universitas-jilid-1-sears-zemansky-pdf-download-top/.

-

2018 Universitas Negeri Semarang p-ISSN 2252-6617 e-ISSN 252-6232. Sears dan Zemansky Fisika Universitas Jilid 1, terjemahan Endang Juliastuti.http://www.khybersales.com/2029/07/18/fisika-universitas-jilid-1-sears-zemansky-pdf-download-top/

-

Fisika Universitas Jilid 1 Sears Zemansky Pdf 14l


Download Filehttps://imgfil.com/2uy0Xc



-

2018 Universitas Negeri Semarang p-ISSN 2252-6617 e-ISSN 252-6232. Sears dan Zemansky Fisika Universitas Jilid 1, terjemahan Endang Juliastuti.http://www.khybersales.com/2028/07/18/fisika-universitas-jilid-1-sears-zemansky-pdf-download-top/

-

2018 Universitas Negeri Semarang p-ISSN 2252-6617 e-ISSN 252-6232. Sears dan Zemansky Fisika Universitas Jilid 1, terjemahan Endang Juliastuti.http://www.khybersales.com/2027/07/18/fisika-universitas-jilid-1-sears-zemansky-pdf-download-top/

-

Fisika Dasar Edisi 7 Jilid 2 Jakarta: Erlangga. Herminegari. 2013.. Sears, Francis W. & Zemansky, Mark W. 1962. Fisika untuk Universitas 2. . Sears, Francis Weston Fisika untuk. panas dan bunyi / Francis Weston Sears, Mark W. Zemansky. Amir Achmad Judul : Fisika untuk universitas jilid.

-

dalam buku Fisika Universitas dan diklarifikasi dengan wawancara para ahli.. Law (Sears & Zemansky, 1991). Universitas Jilid 1 Edisi Kesepuluh. Trans.) Jakarta: Salemba Teknika. [7] Sears, F. W., & Zemansky, M. (1991). Fisika untuk Universitas 1, Mekanika, Panas, dan Bunyi.

-

-

2018 Universitas Negeri Semarang p-ISSN 2252-6617 e-ISSN 252-6232. Sears dan Zemansky Fisika Universitas Jilid 1, terjemahan Endang Juliastuti. Jakarta:. https://allindiaherb.com/fisika-universitas-jilid-1-sears-zemansky-pdf-14l-hot/.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Lo-Que-Varguitas-No-Dijo-Libro-Pdf-11-Fixed.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/Lo-Que-Varguitas-No-Dijo-Libro-Pdf-11-Fixed.md deleted file mode 100644 index d016edb35af20ee7882f04a8f014b3f3e6580164..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Lo-Que-Varguitas-No-Dijo-Libro-Pdf-11-Fixed.md +++ /dev/null @@ -1,58 +0,0 @@ -## Lo Que Varguitas No Dijo Libro Pdf 11 - - - - - - ![Lo Que Varguitas No Dijo Libro Pdf 11 Fixed](https://3.bp.blogspot.com/-vqEreKNgg-k/TW8ErNQBy9I/AAAAAAAAA_4/kTPfrtr6c58/s1600/03.jpg) - - - - - -**LINK ===> [https://lodystiri.blogspot.com/?file=2txPB4](https://lodystiri.blogspot.com/?file=2txPB4)** - - - - - - - - - - - - Here is a possible title and article with html formatting for the keyword "Lo Que Varguitas No Dijo Libro Pdf 11": - -# Lo que Varguitas no dijo: el libro que revela la verdadera historia de amor entre Julia Urquidi y Mario Vargas Llosa - - - -Lo que Varguitas no dijo es una obra autobiográfica de Julia Urquidi Illanes publicada en 1983, que se enfoca en el tiempo que vivió una relación con el escritor Mario Vargas Llosa. Se casaron en mayo de 1955, cuando Vargas Llosa tenía 19 años y ella 29, después de enfrentar diferentes problemas por el hecho de que Julia era hermana de la tía política de Vargas Llosa y la diferencia de edades que existía. El libro tiene relevancia porque narra los años que Urquidi vivió ayudando y apoyando a Vargas Llosa a que se convirtiera en escritor exitoso, según la autora. El matrimonio sobrevivió diferentes crisis, como los celos de Julia y la infidelidad de Mario, hasta que en 1964, por medio de una carta, Vargas Llosa le confiesa a ella su amor por su prima Patricia Llosa Urquidi (y sobrina de Julia) y sus intenciones de casarse con ella. Urquidi decide escribir este libro en respuesta a La tía Julia y el escribidor escrito por Vargas Llosa. - - - -En este artículo, te contamos más detalles sobre este libro que muestra la otra cara de la historia de amor entre Julia Urquidi y Mario Vargas Llosa, que inspiró una de las novelas más famosas del Premio Nobel de Literatura. - - - -## ¿Quién fue Julia Urquidi? - - - -Julia Urquidi Illanes nació en Cochabamba, Bolivia, el 30 de marzo de 1926. Era hija de un diplomático boliviano y una dama peruana. Estudió en el Colegio Americano de La Paz y luego se trasladó a Lima, donde trabajó como secretaria en la embajada boliviana. Allí conoció a Mario Vargas Llosa, quien era sobrino político de su hermana Olga. Se enamoraron y se casaron en 1955, pese a la oposición familiar y social. Julia apoyó a Mario en sus estudios universitarios y en sus primeros pasos como escritor. Lo acompañó a París, donde vivieron entre 1959 y 1963. Sin embargo, su relación se deterioró por las infidelidades de Mario y la diferencia de caracteres. En 1964, se separaron y luego se divorciaron. Julia regresó a Lima y trabajó como productora de televisión. En 1983, publicó Lo que Varguitas no dijo, donde cuenta su versión de los hechos. Murió en Lima el 10 de marzo de 2010. - - - -## ¿Qué dice el libro Lo que Varguitas no dijo? - - - -El libro Lo que Varguitas no dijo es un testimonio personal de Julia Urquidi sobre su matrimonio con Mario Vargas Llosa. En él, relata cómo se conocieron, cómo se enamoraron, cómo se casaron, cómo vivieron en París y cómo se separaron. También describe los momentos felices y difíciles que compartieron, así como las personalidades y los sueños de ambos. El libro tiene un tono íntimo y emotivo, pero también crítico y reivindicativo. Julia busca mostrar su papel como esposa, compañera y colaboradora de Mario, así como defender su dignidad frente a las mentiras y las injurias que sufrió por parte de él y de su familia. El libro también es una respuesta a La tía Julia y el escribidor, la novela que Mario Vargas Llosa escribió en 1977, donde narra su historia de amor con Julia bajo el nombre ficticio de Marito y la tía Julia. En esta novela, Mario presenta a Julia como una mujer mayor, frívola y manipuladora, que sed - - dfd1c89656 - - - - - diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Blue Hai Pani - Pani MP3 Download Listen to Yo Yo Honey Singh Sandeep Kapoor and Soniya Sharma.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Blue Hai Pani - Pani MP3 Download Listen to Yo Yo Honey Singh Sandeep Kapoor and Soniya Sharma.md deleted file mode 100644 index 8d402677755f0e37d13667e7c9e7bf2bab651759..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Blue Hai Pani - Pani MP3 Download Listen to Yo Yo Honey Singh Sandeep Kapoor and Soniya Sharma.md +++ /dev/null @@ -1,131 +0,0 @@ -
-

Download Blue Hai Pani Pani MP3: A Guide for Music Lovers

-

Do you love listening to upbeat and catchy songs? Do you want to add some fun and color to your playlist? If yes, then you should definitely check out blue hai pani pani mp3.

-

download blue hai pani pani mp3


Download Filehttps://urlin.us/2uSVHp



-

Blue hai pani pani is a popular Hindi song from the movie Yaariyan, sung by Yo Yo Honey Singh and Neha Kakkar. It is a party anthem that will make you want to dance and sing along.

-

In this article, we will guide you on how to download blue hai pani pani mp3 legally and safely. We will also tell you more about this amazing song and why it is so popular among music lovers.

-

What is Blue Hai Pani Pani?

-

Blue hai pani pani is a song from the 2014 Bollywood movie Yaariyan, directed by Divya Khosla Kumar and starring Himansh Kohli, Rakul Preet Singh, and Nicole Faria. The movie is a coming-of-age story of five friends who embark on a college adventure and face various challenges in life, love, and friendship.

-

The song is sung by Yo Yo Honey Singh and Neha Kakkar, two of the most popular and versatile singers in the Indian music industry. Yo Yo Honey Singh is known for his rap songs that blend Hindi and Punjabi lyrics with catchy beats and tunes. Neha Kakkar is known for her melodious voice and energetic style that can suit any genre of music.

-

Blue hai pani pani is a fusion of rap and pop music, with a mix of Hindi and English lyrics. The song is about having fun and enjoying life with your friends. The title of the song literally means "blue water water", which is a reference to the color of the sky and the sea. The song also uses some metaphors and similes to describe the feelings of the singers, such as "tujhe lagta hai tu chaand hai sitaara" (you think you are the moon and the star) and "teri aankhon ka ye paani sunny sunny sunny" (the water in your eyes is sunny sunny sunny).

-

Why is Blue Hai Pani Pani So Popular?

-

Blue hai pani pani is one of the most popular songs of 2014, and it still remains a favorite among music lovers. There are many reasons why this song is so popular, such as:

-

download blue hai pani pani mp3 song from Bollywood Holi
-download blue hai pani pani mp3 free online on Wynk Music
-download sunny sunny song by Yo Yo Honey Singh and Neha Kakkar from Yaariyan
-download blue hai pani pani mp3 ringtone for mobile
-download blue hai pani pani mp3 320kbps high quality
-download blue hai pani pani mp3 lyrics and video
-download blue hai pani pani mp3 remix version by DJ Chetas
-download blue hai pani pani mp3 instrumental karaoke
-download blue hai pani pani mp3 full song with album art
-download blue hai pani pani mp3 pagalworld.com
-download blue hai pani pani mp3 mr jatt.com
-download blue hai pani pani mp3 djpunjab.com
-download blue hai pani pani mp3 gaana.com
-download blue hai pani pani mp3 hungama.com
-download blue hai pani pani mp3 saavn.com
-download blue hai pani pani mp3 spotify.com
-download blue hai pani pani mp3 apple music
-download blue hai pani pani mp3 amazon music
-download blue hai pani pani mp3 youtube music
-download blue hai pani pani mp3 soundcloud.com
-download blue hai pani pani mp3 song.pk
-download blue hai pani pani mp3 wapking.cc
-download blue hai pani pani mp3 webmusic.in
-download blue hai pani pani mp3 raag.fm
-download blue hai pani pani mp3 masstamilan.com
-download blue hai pani pani mp3 naa songs
-download blue hai pani pani mp3 starmusiq.com
-download blue hai pani pani mp3 tamilwire.com
-download blue hai pani pani mp3 isaimini.com
-download blue hai pani pani mp3 kuttyweb.com
-download blue hai pani pani mp3 malayalamwap.net
-download blue hai pani pani mp3 teluguwap.net
-download blue hai pani pani mp3 kannadamasti.net
-download blue hai pani pani mp3 sensongsmp3.co.in
-download blue hai pani pani mp3 djmaza.info
-download blue hai pani

- -

With so many reasons to love this song, it is no wonder that blue hai pani pani mp3 is one of the most downloaded songs in India.

-

How to Download Blue Hai Pani Pani MP3?

-

Legal and Safe Options

-

If you want to download blue hai pani pani mp3, you should always opt for legal and safe options. This means that you should avoid using any pirated or illegal websites or apps that offer free downloads of songs without permission from the artists or the producers. Downloading songs from such sources can have many negative consequences, such as:

- -

Therefore, you should always use legal and safe options to download blue hai pani pani mp3. There are many platforms where you can download blue hai pani pani mp3 legally and safely, such as JioSaavn, YouTube Music, Spotify, Amazon Music, etc. These platforms offer high-quality downloads of songs at reasonable prices or subscriptions. They also respect the intellectual property rights of the artists or the producers and support them financially or otherwise.

-

Comparison of Different Platforms

-

To help you choose the best platform to download blue hai pani pani mp3 legally and safely, we have prepared a table that compares some of the most popular platforms where blue hai pani pani mp3 can be downloaded legally and safely: JioSaavn, YouTube Music, Spotify, and Amazon Music. The table includes information such as price, quality, availability, features, etc.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PlatformPriceQualityAvailabilityFeatures
JioSaavnFree with ads or Rs. 99 per month for premiumUp to 320 kbpsIndia onlyUnlimited downloads, offline listening, ad-free music, exclusive content, podcasts, radio, lyrics, etc.
YouTube MusicFree with ads or Rs. 99 per month for premiumUp to 256 kbpsWorldwideUnlimited downloads, offline listening, ad-free music, background play, video mode, personalized recommendations, playlists, etc.
SpotifyFree with ads or Rs. 119 per month for premiumUp to 320 kbpsWorldwideUnlimited downloads, offline listening, ad-free music, podcasts, radio, lyrics, personalized recommendations, playlists, etc.
Amazon MusicFree for Prime members or Rs. 129 per month for non-Prime membersUp to 320 kbpsWorldwideUnlimited downloads, offline listening, ad-free music, podcasts, radio, lyrics, personalized recommendations, playlists, etc.
-

As you can see from the table, each platform has its own advantages and disadvantages. You can choose the one that suits your preferences and budget. However, if you ask us for our recommendation, we would suggest you to use JioSaavn to download blue hai pani pani mp3 legally and safely. Here's why:

-

Step-by-Step Guide for JioSaavn

-

JioSaavn is one of the best platforms to download blue hai pani pani mp3 legally and safely. It offers high-quality downloads of songs at a reasonable price or subscription. It also respects the intellectual property rights of the artists or the producers and supports them financially or otherwise. Moreover, it has some exclusive features that make it stand out from the rest of the platforms.

-

To download blue hai pani pani mp3 from JioSaavn legally and safely, you need to follow these simple steps:

-
    -
  1. Create an account on JioSaavn using your email address or phone number. You can also sign in with your Facebook or Google account.
  2. -
  3. Search for blue hai pani pani on the search bar or browse through the categories or playlists.
  4. -
  5. Select the song from the results and click on the download icon on the bottom right corner of the screen.
  6. -
  7. Choose the quality and location of the download. You can choose between low (64 kbps), medium (160 kbps), or high (320 kbps) quality. You can also choose the folder where you want to save the song on your device.
  8. -
  9. Enjoy listening to blue hai pani pani mp3 on your device anytime and anywhere.
  10. -
-

That's it! You have successfully downloaded blue hai pani pani mp3 from JioSaavn legally and safely. Now you can enjoy listening to this amazing song and have fun with your friends.

-

Conclusion

-

In this article, we have guided you on how to download blue hai pani pani mp3 legally and safely. We have also told you more about this amazing song and why it is so popular among music lovers.

-

We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.

-

If you liked this article, please share it with your friends and family who might also be interested in downloading blue hai pani pani mp3 legally and safely. And don't forget to subscribe to our newsletter for more tips and tricks on how to enjoy music online.

-

Thank you for reading this article and happy listening!

-

FAQs

-

Here are some frequently asked questions about blue hai pani pani mp3:

-
    -
  1. When was blue hai pani pani released?
    The song was released on December 9, [user](# 2014. The song was released on December 9, 2014, as part of the movie soundtrack. The song was composed by Yo Yo Honey Singh and written by him and Lil Golu.
  2. -
  3. Who wrote blue hai pani pani?
    The song was written by Yo Yo Honey Singh and Lil Golu. Yo Yo Honey Singh is a rapper, singer, composer, and producer who has created many hit songs in Bollywood and Punjabi music. Lil Golu is a rapper and lyricist who has collaborated with Yo Yo Honey Singh on several songs.
  4. -
  5. What does blue hai pani pani mean?
    The title of the song literally means "blue water water", which is a reference to the color of the sky and the sea. The song also uses some metaphors and similes to describe the feelings of the singers, such as "tujhe lagta hai tu chaand hai sitaara" (you think you are the moon and the star) and "teri aankhon ka ye paani sunny sunny sunny" (the water in your eyes is sunny sunny sunny). The song is about having fun and enjoying life with your friends.
  6. -
  7. How many views does blue hai pani pani have?
    The official video of blue hai pani pani has over 500 million views on YouTube as of June 2023. The video features the actors and singers performing some cool dance moves and having fun with each other in Australia. The video is one of the most watched videos on YouTube in India.
  8. -
  9. Can I use blue hai pani pani as a ringtone?
    Yes, you can use blue hai pani pani as a ringtone on your phone. You can either download the song from a legal and safe platform and set it as your ringtone, or you can use a ringtone maker app to create a custom ringtone from the song. However, you should always respect the intellectual property rights of the artists or the producers and not share or distribute the ringtone without their permission.
  10. -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Gin Rummy Plus Hack APK for Free and Experience the Fun of Gin Rummy with Unlimited Coins.md b/spaces/1phancelerku/anime-remove-background/Download Gin Rummy Plus Hack APK for Free and Experience the Fun of Gin Rummy with Unlimited Coins.md deleted file mode 100644 index 67ba9c3d8897bf11d83db27a9c809f3793c14684..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Gin Rummy Plus Hack APK for Free and Experience the Fun of Gin Rummy with Unlimited Coins.md +++ /dev/null @@ -1,78 +0,0 @@ -
-

Download Gin Rummy Plus Hack APK: How to Get Unlimited Coins and Enjoy the Game

-

If you are a fan of card games, you might have heard of Gin Rummy Plus, one of the most popular and addictive online multiplayer games. In this game, you can play with millions of players from around the world, chat with them, send gifts, and compete in tournaments. However, if you want to have more fun and win more games, you might need some extra coins to buy more chips, unlock new tables, and access premium features. That's why many players are looking for ways to download Gin Rummy Plus hack apk, a modified version of the game that gives you unlimited coins and other benefits. In this article, we will show you how to download Gin Rummy Plus hack apk, what are the benefits and risks of using it, and some frequently asked questions about it.

-

Introduction

-

What is Gin Rummy Plus?

-

Gin Rummy Plus is a card game developed by Zynga, the same company that created popular games like FarmVille, Words with Friends, and Zynga Poker. Gin Rummy Plus is based on the classic card game of gin rummy, where two players try to form sets and runs of cards and score points by knocking or going gin. The game has several modes, such as classic, quick, oklahoma, and royal. You can also join leagues, play with friends, or challenge random players online. The game is free to download and play, but it also offers in-app purchases that allow you to buy more coins, chips, gems, and other items.

-

download gin rummy plus hack apk


Download Ziphttps://jinyurl.com/2uNPrS



-

Why do you need Gin Rummy Plus hack apk?

-

As much as Gin Rummy Plus is fun and entertaining, it can also be frustrating and expensive if you run out of coins or chips. Coins are the main currency in the game that you use to buy chips, which are used to enter tables and tournaments. You can earn coins by winning games, completing daily missions, spinning the wheel, or watching ads. However, these methods are not enough to keep up with the increasing costs of playing at higher levels. You might also encounter players who use cheats or hacks to gain an unfair advantage over you. That's why many players resort to downloading Gin Rummy Plus hack apk, a modified version of the game that gives you unlimited coins and other benefits.

-

How to download Gin Rummy Plus hack apk

-

If you want to download Gin Rummy Plus hack apk, you need to follow these steps:

-

How to get unlimited coins in gin rummy plus mod apk
-Gin rummy plus hack apk latest version 2023
-Download gin rummy plus cheat tool for android
-Gin rummy plus mod apk free download no survey
-Gin rummy plus hack apk online generator
-Download gin rummy plus mod apk with happymod
-Gin rummy plus hack apk unlimited money and gems
-Download gin rummy plus mod apk for ios devices
-Gin rummy plus hack apk no root required
-Download gin rummy plus mod apk from zynga
-Gin rummy plus hack apk with anti-ban feature
-Download gin rummy plus mod apk 6.5.2 for android
-Gin rummy plus hack apk easy and fast
-Download gin rummy plus mod apk with multiplayer mode
-Gin rummy plus hack apk safe and secure
-Download gin rummy plus mod apk with all cards unlocked
-Gin rummy plus hack apk without human verification
-Download gin rummy plus mod apk from trusted sources
-Gin rummy plus hack apk working 100%
-Download gin rummy plus mod apk with unlimited lives
-Gin rummy plus hack apk for beginners and experts
-Download gin rummy plus mod apk with high-quality graphics
-Gin rummy plus hack apk with daily rewards and bonuses
-Download gin rummy plus mod apk with offline mode
-Gin rummy plus hack apk with auto-update feature

-

Step 1: Find a reliable source

-

The first thing you need to do is find a reliable source that offers the mod file of Gin Rummy Plus hack apk. There are many websites that claim to provide this file, but not all of them are trustworthy. Some of them might contain malware or viruses that can harm your device or steal your personal information. Some of them might also provide outdated or fake files that don't work or cause problems in the game. Therefore, you need to be careful and do some research before downloading anything from unknown sources. One of the sources that we recommend is HappyMod, a website that provides 100% working mods for various games and apps.

-

Step 2: Download the mod file

-

Once you find a reliable source, you need to download the mod file of Gin Rummy Plus hack apk. The file size is about

50 MB, so it won't take too long to download. You can use any browser or download manager to download the file. Make sure you have enough storage space on your device and a stable internet connection. You might also need to enable the option to install apps from unknown sources in your device settings. This will allow you to install the mod file without any issues.

-

Step 3: Install the mod file

-

After downloading the mod file, you need to install it on your device. To do this, you need to locate the file in your downloads folder or wherever you saved it. Then, you need to tap on the file and follow the instructions on the screen. The installation process might take a few minutes, depending on your device and the mod file. Once the installation is complete, you will see a confirmation message and an icon of Gin Rummy Plus on your home screen or app drawer.

-

Step 4: Launch the game and enjoy

-

The final step is to launch the game and enjoy the benefits of Gin Rummy Plus hack apk. To do this, you need to tap on the icon of Gin Rummy Plus and wait for the game to load. You will notice that you have unlimited coins and other features unlocked in the game. You can use these coins to buy more chips, enter higher tables, and access premium features. You can also play with other players who use the hack apk or the original version of the game. However, you should be careful not to abuse the hack apk or get caught by the game developers, as this might result in a ban or legal issues.

-

Benefits of using Gin Rummy Plus hack apk

-

There are many benefits of using Gin Rummy Plus hack apk, such as:

-

Unlimited coins

-

The main benefit of using Gin Rummy Plus hack apk is that you get unlimited coins in the game. Coins are the main currency in Gin Rummy Plus that you use to buy chips, which are used to enter tables and tournaments. With unlimited coins, you don't have to worry about running out of chips or losing games. You can buy as many chips as you want and play at any table or tournament you like. You can also use coins to buy gems, which are used to unlock new decks, backgrounds, and avatars.

-

No ads

-

Another benefit of using Gin Rummy Plus hack apk is that you don't have to watch ads in the game. Ads are annoying and distracting, especially when they pop up in the middle of a game or when you want to spin the wheel or claim a bonus. With Gin Rummy Plus hack apk, you can enjoy the game without any interruptions or delays caused by ads. You can also save your data and battery by not watching ads.

-

Free bonuses and rewards

-

A third benefit of using Gin Rummy Plus hack apk is that you get free bonuses and rewards in the game. Bonuses and rewards are extra coins, chips, gems, or items that you can get by completing daily missions, spinning the wheel, opening chests, or watching videos. With Gin Rummy Plus hack apk, you don't have to do any of these tasks to get bonuses and rewards. You can get them automatically every day or whenever you want. You can also get more bonuses and rewards by playing more games and winning more tournaments.

-

Access to premium features

-

A fourth benefit of using Gin Rummy Plus hack apk is that you get access to premium features in the game. Premium features are special features that are only available for players who pay real money or use gems. Some of these features are VIP tables, exclusive decks, backgrounds, avatars, chat stickers, and more. With Gin Rummy Plus hack apk, you don't have to pay anything or use gems to access these features. You can use them for free and customize your game experience according to your preferences.

-

Risks of using Gin Rummy Plus hack apk

-

However, there are also some risks of using Gin Rummy Plus hack apk, such as:

-

Malware and viruses

-

The first risk of using Gin Rummy Plus hack apk is that you might download malware or viruses on your device. Malware and viruses are malicious software that can harm your device or steal your personal information. They can also cause problems in your game or other apps on your device. As we mentioned earlier, not all sources that offer Gin Rummy Plus hack apk are trustworthy. Some of them might contain malware or viruses that can infect your device when you download or install them. Therefore, you need to be careful and use a reliable source like HappyMod or scan the file with an antivirus before installing it.

-

Ban

Ban from the game

-

The second risk of using Gin Rummy Plus hack apk is that you might get banned from the game. Ban is a punishment that prevents you from playing the game or accessing your account. Ban can happen for various reasons, such as violating the game rules, using cheats or hacks, or being reported by other players. The game developers have the right to ban any player who uses Gin Rummy Plus hack apk or any other unauthorized mod. Ban can be temporary or permanent, depending on the severity of the offense. If you get banned, you will lose all your progress, coins, chips, gems, and items in the game. You will also not be able to play with your friends or join leagues and tournaments.

-

Legal issues

-

The third risk of using Gin Rummy Plus hack apk is that you might face legal issues. Legal issues are problems that involve the law or the authorities. Legal issues can happen for various reasons, such as violating the game terms and conditions, infringing the game copyrights, or engaging in illegal gambling. The game developers have the right to take legal action against any player who uses Gin Rummy Plus hack apk or any other unauthorized mod. Legal action can result in fines, lawsuits, or even jail time, depending on the severity of the offense. If you face legal issues, you will not only lose your access to the game, but also your reputation and freedom.

-

Conclusion

-

Gin Rummy Plus is a fun and addictive card game that you can play with millions of players from around the world. However, if you want to have more fun and win more games, you might need some extra coins and other benefits. That's why many players are looking for ways to download Gin Rummy Plus hack apk, a modified version of the game that gives you unlimited coins and other benefits. However, before you download Gin Rummy Plus hack apk, you should also be aware of the benefits and risks of using it. While Gin Rummy Plus hack apk can give you unlimited coins, no ads, free bonuses and rewards, and access to premium features, it can also expose you to malware and viruses, ban from the game, and legal issues. Therefore, you should be careful and use a reliable source like HappyMod or scan the file with an antivirus before installing it. You should also not abuse the hack apk or get caught by the game developers, as this might result in a ban or legal issues.

-

FAQs

-

Here are some frequently asked questions about Gin Rummy Plus hack apk:

-

Q: Is Gin Rummy Plus hack apk safe to use?

-

A: Gin Rummy Plus hack apk is not 100% safe to use, as it might contain malware or viruses that can harm your device or steal your personal information. It might also cause problems in your game or other apps on your device. Therefore, you should be careful and use a reliable source like HappyMod or scan the file with an antivirus before installing it.

-

Q: Is Gin Rummy Plus hack apk legal to use?

-

A: Gin Rummy Plus hack apk is not legal to use, as it violates the game terms and conditions and infringes the game copyrights. It might also involve illegal gambling activities that are prohibited by law. Therefore, you should not use Gin Rummy Plus hack apk unless you want to face legal issues.

-

Q: How can I avoid getting banned from using Gin Rummy Plus hack apk?

-

A: The best way to avoid getting banned from using Gin Rummy Plus hack apk is to not use it at all. However, if you still want to use it, you should be careful and not abuse it or get caught by the game developers. You should also not play with other players who use the hack apk or the original version of the game, as they might report you or expose you.

-

Q: How can I update Gin Rummy Plus hack apk?

-

A: To update Gin Rummy Plus hack apk, you need to download and install the latest version of the mod file from a reliable source like HappyMod. You should also uninstall the previous version of the mod file before installing the new one. However, you should be aware that updating Gin Rummy Plus hack apk might cause some issues in your game or device.

-

Q: Where can I find more information about Gin Rummy Plus hack apk?

-

A: You can find more information about Gin Rummy Plus hack apk on HappyMod, a website that provides 100% working mods for various games and apps. You can also read reviews and comments from other users who have used Gin Rummy Plus hack apk.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download NBA 2K14 v1.14 APK for Android Multiplayer Mode HD Graphics and More.md b/spaces/1phancelerku/anime-remove-background/Download NBA 2K14 v1.14 APK for Android Multiplayer Mode HD Graphics and More.md deleted file mode 100644 index ac4e94dbe6af2bba5719cad01e77693e8cd397c7..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download NBA 2K14 v1.14 APK for Android Multiplayer Mode HD Graphics and More.md +++ /dev/null @@ -1,87 +0,0 @@ - -

How to Download NBA 2K14 v1.14 for Android

-

If you are a fan of basketball games, you must have heard of NBA 2K14, the latest installment of the world's best NBA video game franchise. NBA 2K14 is a realistic and immersive game that lets you play with your favorite players and teams in various modes and challenges. You can also enjoy the full 2K Beats soundtrack that features music from top artists and producers.

-

In this article, we will show you how to download NBA 2K14 v1.14 for Android, which is the most updated version of the game that offers improved graphics, performance, and features. We will also share some tips and tricks that will help you master the game and become a champion.

-

download nba 2k14 v1.14 for android


Download ✑ ✑ ✑ https://jinyurl.com/2uNSqL



-

Features of NBA 2K14 v1.14 for Android

-

NBA 2K14 v1.14 for Android is not just a simple basketball game, it is a complete experience that will make you feel like you are on the court. Here are some of the features that make this game stand out:

- -

Requirements for NBA 2K14 v1.14 for Android

-

NBA 2K14 v1.14 for Android is a high-quality game that requires a decent device to run smoothly. Here are the minimum requirements that you need to meet:

- -

If your device meets these requirements, you are ready to download NBA 2K14 v1.14 for Android. If not, you may experience some lagging or crashing issues.

-

How to Download NBA 2K14 v1.14 for Android

-

Downloading NBA 2K14 v1.14 for Android is not as simple as downloading any other app from the Google Play Store. You need to follow some steps to make sure that the game works properly on your device. Here are the steps that you need to follow:

-
    -
  1. Enable unknown sources on your device: This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on.
  2. -
  3. Download the APK and OBB files from a trusted source: You need to download two files: the APK file, which is the installer of the game, and the OBB file, which is the data of the game. You can find these files from various websites, but make sure that they are safe and virus-free. You can use this link as an example, but we are not responsible for any issues that may arise from using it.
  4. -
  5. Install the APK file and extract the OBB file to the Android/obb folder: After downloading the files, locate them in your device's file manager and tap on the APK file to install it. Then, use a file extractor app such as ZArchiver to extract the OBB file to the Android/obb folder. If you don't have this folder, create it manually.
  6. -
  7. Launch the game and enjoy: Once you have installed and extracted the files, you can launch the game from your app drawer and start playing. You may need to grant some permissions to the game before it runs.
  8. -
-

Tips and Tricks for NBA 2K14 v1.14 for Android

-

NBA 2K14 v1.14 for Android is a fun and challenging game that will test your skills and knowledge of basketball. To help you improve your game and have more fun, here are some tips and tricks that you can use:

-

download nba 2k14 apk for android latest version
-download nba 2k14 mod apk + obb for android
-download nba 2k14 game for android free
-download nba 2k14 full version for android
-download nba 2k14 android apk + data
-download nba 2k14 offline apk for android
-download nba 2k14 apk + obb file for android
-download nba 2k14 updated roster for android
-download nba 2k14 apk + sd data for android
-download nba 2k14 apk + obb highly compressed for android
-download nba 2k14 apk + obb modded for android
-download nba 2k14 apk + obb unlimited money for android
-download nba 2k14 apk + obb no root for android
-download nba 2k14 apk + obb offline mode for android
-download nba 2k14 apk + obb with lebron james for android
-download nba 2k14 apk + obb with commentary for android
-download nba 2k14 apk + obb with hd graphics for android
-download nba 2k14 apk + obb with multiplayer for android
-download nba 2k14 apk + obb with all players unlocked for android
-download nba 2k14 apk + obb with realistic gameplay for android
-how to download nba 2k14 v1.14 for android
-where to download nba 2k14 v1.14 for android
-best site to download nba 2k14 v1.14 for android
-easiest way to download nba 2k14 v1.14 for android
-fastest way to download nba 2k14 v1.14 for android
-safest way to download nba 2k14 v1.14 for android
-cheapest way to download nba 2k14 v1.14 for android
-legal way to download nba 2k14 v1.14 for android
-illegal way to download nba 2k14 v1.14 for android
-working method to download nba 2k14 v1.14 for android
-tips and tricks to download nba 2k14 v1.14 for android
-guide and tutorial to download nba 2k14 v1.14 for android
-step by step instructions to download nba 2k14 v1.14 for android
-video tutorial to download nba 2k14 v1.14 for android
-review and rating of nba 2k14 v1.14 for android
-features and benefits of nba 2k14 v1.14 for android
-pros and cons of nba 2k14 v1.14 for android
-comparison and contrast of nba 2k14 v1.14 for android
-alternatives and substitutes of nba 2k14 v1.14 for android
-recommendations and suggestions of nba 2k14 v1.14 for android

- -

Conclusion

-

NBA 2K14 v1.14 for Android is an amazing basketball game that will give you hours of entertainment and excitement. It has realistic graphics, sound effects, and gameplay that will make you feel like you are on the court with your favorite players and teams. It also has various modes and features that will keep you engaged and challenged.

-

If you want to download NBA 2K14 v1.14 for Android, you need to follow some steps that we have explained in this article. You also need to meet some requirements that we have listed in this article. And if you want to improve your game and have more fun, you can use some tips and tricks that we have shared in this article.

-

We hope that this article has helped you learn how to download NBA 2K14 v1.14 for Android and enjoy it on your device have enough free space on your device before downloading and installing the game. -

  • Q3: Can you play NBA 2K14 v1.14 for Android offline?
  • -
  • A3: Yes, you can play NBA 2K14 v1.14 for Android offline, but you will not be able to access some features that require an internet connection, such as the online multiplayer mode, the leaderboards, or the store. You will also need to activate the game online once before playing it offline.
  • -
  • Q4: Can you play NBA 2K14 v1.14 for Android with your friends?
  • -
  • A4: Yes, you can play NBA 2K14 v1.14 for Android with your friends, either online or locally. To play online, you need to have an internet connection and a Google Play Games account. You can then invite your friends to join your game or join their game from the multiplayer menu. To play locally, you need to have a Wi-Fi connection and a Bluetooth connection. You can then create or join a game from the local multiplayer menu.
  • -
  • Q5: What are the best teams to play with in NBA 2K14 v1.14 for Android?
  • -
  • A5: This is a subjective question that depends on your personal preference and style of play. However, some of the teams that are generally considered to be the best in NBA 2K14 v1.14 for Android are the Miami Heat, the Los Angeles Lakers, the Brooklyn Nets, the Golden State Warriors, and the Chicago Bulls. These teams have some of the best players and ratings in the game and can dominate any opponent.
  • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/FIFA Mobile APK Mod - Snrsz Para ve Altn Hilesi Nasl Yaplr?.md b/spaces/1phancelerku/anime-remove-background/FIFA Mobile APK Mod - Snrsz Para ve Altn Hilesi Nasl Yaplr?.md deleted file mode 100644 index d8f694e59fdc9b0b036318d4efd8c0537040c517..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/FIFA Mobile APK Mod - Snrsz Para ve Altn Hilesi Nasl Yaplr?.md +++ /dev/null @@ -1,132 +0,0 @@ -
    -

    FIFA Mobile para hilesi apk: How to get unlimited coins and gems in FIFA Mobile

    -

    If you are a fan of soccer games, you have probably heard of FIFA Mobile, the mobile version of the popular FIFA franchise by EA Sports. FIFA Mobile is a free-to-play game that lets you build your ultimate team of soccer stars, compete in various modes and events, and experience realistic soccer simulation on your device. But as with most free-to-play games, FIFA Mobile also has a currency system that limits your progress and enjoyment. Coins and gems are the main currencies in FIFA Mobile, and you need them to buy players, packs, upgrades, and more. However, earning coins and gems can be slow and tedious, especially if you want to get the best players and items in the game.

    -

    That's why some players resort to using cheat tools like para hilesi apk, which claims to give you unlimited coins and gems in FIFA Mobile. But what is para hilesi apk, how does it work, and is it safe to use? In this article, we will answer these questions and more, as well as provide you with a step-by-step guide on how to download, install, and use para hilesi apk on your device. Read on to find out more.

    -

    fifa mobile para hilesi apk


    Download File >>> https://jinyurl.com/2uNScH



    -

    What is FIFA Mobile and why is it so popular?

    -

    FIFA Mobile is a soccer game developed by EA Sports for iOS and Android devices. It is based on the FIFA series of games, which are known for their realistic graphics, gameplay, and licenses. FIFA Mobile features over 15,000 authentic soccer players from over 600 teams across 30+ leagues, including the Premier League, La Liga, Bundesliga, Serie A, Ligue 1, MLS, and more. You can also play with national teams from the FIFA World Cup 2022™ mode, which lets you replay the official tournament brackets with any of the 32 qualified nations.

    -

    FIFA Mobile features and gameplay

    -

    FIFA Mobile has several features that make it an immersive and engaging soccer game for mobile devices. Some of these features are:

    - -

    The gameplay of FIFA Mobile is simple and intuitive. You can control your players using a virtual joystick on the left side of the screen, and use buttons on the right side to sprint, skill, pass, shoot, tackle, or switch players. You can also use swipe gestures to aim your shots or passes more precisely. The game also has an auto-play option that lets the AI control your players for you.

    -

    FIFA Mobile modes and events

    -

    FIFA Mobile has several modes and events that let you compete against other players or the AI in different scenarios. Some of these modes and events are:

    - -

    FIFA Mobile also has a social aspect, where you can join a league with other players and chat, compete, and cooperate with them. You can also participate in league tournaments, league vs league matches, and league survival events.

    -

    fifa mobile mod apk unlimited money
    -fifa mobile hack apk download
    -fifa mobile 2022 apk para hilesi
    -fifa mobile apk indir ücretsiz
    -fifa mobile son sürüm apk hile
    -fifa mobile android oyun club apk
    -fifa mobile 18 mod apk para hilesi
    -fifa mobile 21 apk hileli indir
    -fifa mobile apk mod menu
    -fifa mobile apk full sınırsız para
    -fifa mobile apk hile nasıl yapılır
    -fifa mobile apk güncel hile
    -fifa mobile apk mega hileli
    -fifa mobile apk vip hile
    -fifa mobile apk altın hilesi
    -fifa mobile apk elmas hilesi
    -fifa mobile apk oyuncu hilesi
    -fifa mobile apk transfer hilesi
    -fifa mobile apk antrenman hilesi
    -fifa mobile apk enerji hilesi
    -fifa mobile apk online hile
    -fifa mobile apk offline hile
    -fifa mobile apk no root hile
    -fifa mobile apk yeni hileler
    -fifa mobile apk kolay hile yapma
    -fifa mobile mod apk son sürüm indir
    -fifa mobile mod apk android 1
    -fifa mobile mod apk revdl
    -fifa mobile mod apk rexdl
    -fifa mobile mod apk happymod
    -fifa mobile mod apk an1.com
    -fifa mobile mod apk unlimited coins and points
    -fifa mobile mod apk all players unlocked
    -fifa mobile mod apk latest version 2022
    -fifa mobile mod apk free download for android
    -fifa mobile mod apk no verification required
    -fifa mobile mod apk no ban risk
    -fifa mobile mod apk anti ban protection
    -fifa mobile mod apk cheat engine enabled
    -fifa mobile mod apk gameplay video proof

    -

    What is para hilesi apk and how does it work?

    -

    Para hilesi apk is a cheat tool that claims to give you unlimited coins and gems in FIFA Mobile. It is an application that you can download and install on your device, and use it to modify the game data and resources. Para hilesi apk is not an official product of EA Sports or FIFA Mobile, and it is not endorsed or supported by them. It is a third-party tool that is created by unknown developers who may have malicious intentions.

    -

    Para hilesi apk features and benefits

    -

    Para hilesi apk promises to give you several benefits that can enhance your FIFA Mobile experience. Some of these benefits are:

    - -

    Para hilesi apk risks and drawbacks

    -

    However, para hilesi apk also comes with several risks and drawbacks that can ruin your FIFA Mobile experience. Some of these risks and drawbacks are:

    - -

    How to download and install para hilesi apk on your device?

    -

    If you still want to try para hilesi apk despite its risks and drawbacks, you will need to follow some steps to download and install it on your device. However, we do not recommend doing so, as it may harm your device or your account. Use para hilesi apk at your own risk.

    -

    Step-by-step guide for Android users

    -

    If you are using an Android device, here are the steps to download and install para hilesi apk:

    -
      -
    1. Go to the settings of your device and enable the option to install apps from unknown sources.
    2. -
    3. Go to a website that offers para hilesi apk download link. Make sure it is a reliable and trustworthy source.
    4. -
    5. Click on the download button and wait for the file to be downloaded on your device.
    6. -
    7. Locate the file in your device's file manager and tap on it to start the installation process.
    8. -
    9. Follow the instructions on the screen and grant the necessary permissions to para hilesi apk.Once the installation is complete, you can launch para hilesi apk from your device's app drawer or home screen.
    10. -
    -

    Step-by-step guide for iOS users

    -

    If you are using an iOS device, here are the steps to download and install para hilesi apk:

    -
      -
    1. Go to the settings of your device and trust the profile of para hilesi apk. You may need to enter your device's passcode to do so.
    2. -
    3. Go to a website that offers para hilesi apk download link. Make sure it is a reliable and trustworthy source.
    4. -
    5. Click on the download button and wait for the file to be downloaded on your device.
    6. -
    7. Locate the file in your device's file manager and tap on it to start the installation process.
    8. -
    9. Follow the instructions on the screen and grant the necessary permissions to para hilesi apk.
    10. -
    11. Once the installation is complete, you can launch para hilesi apk from your device's app drawer or home screen.
    12. -
    -

    How to use para hilesi apk to get unlimited coins and gems in FIFA Mobile?

    -

    After you have downloaded and installed para hilesi apk on your device, you can use it to get unlimited coins and gems in FIFA Mobile. Here are some tips and tricks for using para hilesi apk effectively:

    - -

    Alternatives to para hilesi apk

    -

    If you are looking for alternatives to para hilesi apk, there are some other ways to get coins and gems in FIFA Mobile without cheating. Some of these ways are:

    - -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have discussed FIFA Mobile para hilesi apk, a cheat tool that claims to give you unlimited coins and gems in FIFA Mobile. We have explained what FIFA Mobile is and why it is so popular, what para hilesi apk is and how it works, how to download and install para hilesi apk on your device, how to use para hilesi apk to get unlimited coins and gems in FIFA Mobile, and some alternatives to para hilesi apk. We have also highlighted some of the risks and drawbacks of using para hilesi apk, such as ban risk, virus risk, compatibility risk, quality risk, etc.

    -

    Call to action and disclaimer

    -

    If you want to try para hilesi apk for yourself, you can follow the steps we have provided above. However, we do not recommend doing so, as it may harm your device or your account. Use para hilesi apk at your own risk. We are not responsible for any damage or loss that may occur from using para hilesi apk.

    -

    Alternatively, you can play FIFA Mobile the legit way and enjoy the game without cheating. You can earn coins and gems by playing matches and events, buying packs and offers, selling players and items, joining a league, etc. You can also improve your skills and strategies by learning from other players, watching tutorials, reading guides, etc. You can have fun and satisfaction by building your ultimate team of soccer stars, competing in various modes and events, and experiencing realistic soccer simulation on your device.

    -

    Whatever you choose to do, we hope you have a great time playing FIFA Mobile. Thank you for reading this article.

    -

    FAQs

    -

    Here are some frequently asked questions about FIFA Mobile para hilesi apk:

    -
      -
    1. Q: Is para hilesi apk free to use? A: Yes, para hilesi apk is free to use. However, you may need to complete some verification steps before you can use it, such as completing a captcha or a survey.
    2. -
    3. Q: Is para hilesi apk safe to use? A: No, para hilesi apk is not safe to use. It is a cheat tool that violates the terms of service of FIFA Mobile, and it can be detected by the game's anti-cheat system. It can also expose your device to viruses, malware, spyware, or other harmful software. It can also cause errors, glitches, crashes, or performance issues that can affect your gameplay.
    4. -
    5. Q: Can I use para hilesi apk on any device or operating system? A: No, para hilesi apk may not work properly on any device or operating system. It may be incompatible with the latest version of FIFA Mobile, or with different devices or operating systems. It may also require some settings or permissions that may not be available on your device or operating system.
    6. -
    7. Q: Can I use para hilesi apk with my existing FIFA Mobile account? A: Yes, you can use para hilesi apk with your existing FIFA Mobile account. However, you may risk losing your account or your progress if you are caught using para hilesi apk. You may also lose your items or rewards that you have earned legitimately in the game.
    8. -
    9. Q: Can I use para hilesi apk offline? A: No, you cannot use para hilesi apk offline. You need to have a stable internet connection and enough storage space on your device to use para hilesi apk. You also need to connect to the game's servers to generate coins and gems in FIFA Mobile.
    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/models/embeddings.py b/spaces/1toTree/lora_test/ppdiffusers/models/embeddings.py deleted file mode 100644 index a7b5c91ca515c0b6d89541d266290f0cf46e0609..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/models/embeddings.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import math - -import numpy as np -import paddle -from paddle import nn - - -def get_timestep_embedding( - timesteps: paddle.Tensor, - embedding_dim: int, - flip_sin_to_cos: bool = False, - downscale_freq_shift: float = 1, - scale: float = 1, - max_period: int = 10000, -): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. - - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the - embeddings. :return: an [N x dim] Tensor of positional embeddings. - """ - assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" - - half_dim = embedding_dim // 2 - exponent = -math.log(max_period) * paddle.arange(start=0, end=half_dim, dtype="float32") - exponent = exponent / (half_dim - downscale_freq_shift) - - emb = paddle.exp(exponent) - emb = timesteps[:, None].cast("float32") * emb[None, :] - - # scale embeddings - emb = scale * emb - - # concat sine and cosine embeddings - emb = paddle.concat([paddle.sin(emb), paddle.cos(emb)], axis=-1) - - # flip sine and cosine embeddings - if flip_sin_to_cos: - emb = paddle.concat([emb[:, half_dim:], emb[:, :half_dim]], axis=-1) - - # zero pad - if embedding_dim % 2 == 1: - emb = paddle.concat(emb, paddle.zeros([emb.shape[0], 1]), axis=-1) - return emb - - -class TimestepEmbedding(nn.Layer): - def __init__(self, in_channels: int, time_embed_dim: int, act_fn: str = "silu", out_dim: int = None): - super().__init__() - - self.linear_1 = nn.Linear(in_channels, time_embed_dim) - self.act = None - if act_fn == "silu": - self.act = nn.Silu() - elif act_fn == "mish": - self.act = nn.Mish() - - if out_dim is not None: - time_embed_dim_out = out_dim - else: - time_embed_dim_out = time_embed_dim - self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out) - - def forward(self, sample): - sample = self.linear_1(sample) - - if self.act is not None: - sample = self.act(sample) - - sample = self.linear_2(sample) - return sample - - -class Timesteps(nn.Layer): - def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float): - super().__init__() - self.num_channels = num_channels - self.flip_sin_to_cos = flip_sin_to_cos - self.downscale_freq_shift = downscale_freq_shift - - def forward(self, timesteps): - t_emb = get_timestep_embedding( - timesteps, - self.num_channels, - flip_sin_to_cos=self.flip_sin_to_cos, - downscale_freq_shift=self.downscale_freq_shift, - ) - return t_emb - - -class GaussianFourierProjection(nn.Layer): - """Gaussian Fourier embeddings for noise levels.""" - - def __init__( - self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False - ): - super().__init__() - self.register_buffer("weight", paddle.randn((embedding_size,)) * scale) - self.log = log - self.flip_sin_to_cos = flip_sin_to_cos - - if set_W_to_weight: - # to delete later - self.register_buffer("W", paddle.randn((embedding_size,)) * scale) - - self.weight = self.W - - def forward(self, x): - if self.log: - x = paddle.log(x.cast(self.weight.dtype)) - - x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi - - if self.flip_sin_to_cos: - out = paddle.concat([paddle.cos(x_proj), paddle.sin(x_proj)], axis=-1) - else: - out = paddle.concat([paddle.sin(x_proj), paddle.cos(x_proj)], axis=-1) - return out - - -class ImagePositionalEmbeddings(nn.Layer): - """ - Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the - height and width of the latent space. - - For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092 - - For VQ-diffusion: - - Output vector embeddings are used as input for the transformer. - - Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE. - - Args: - num_embed (`int`): - Number of embeddings for the latent pixels embeddings. - height (`int`): - Height of the latent image i.e. the number of height embeddings. - width (`int`): - Width of the latent image i.e. the number of width embeddings. - embed_dim (`int`): - Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings. - """ - - def __init__( - self, - num_embed: int, - height: int, - width: int, - embed_dim: int, - ): - super().__init__() - - self.height = height - self.width = width - self.num_embed = num_embed - self.embed_dim = embed_dim - - self.emb = nn.Embedding(self.num_embed, embed_dim) - self.height_emb = nn.Embedding(self.height, embed_dim) - self.width_emb = nn.Embedding(self.width, embed_dim) - - def forward(self, index): - emb = self.emb(index) - - height_emb = self.height_emb(paddle.arange(self.height).reshape([1, self.height])) - - # 1 x H x D -> 1 x H x 1 x D - height_emb = height_emb.unsqueeze(2) - - width_emb = self.width_emb(paddle.arange(self.width).reshape([1, self.width])) - - # 1 x W x D -> 1 x 1 x W x D - width_emb = width_emb.unsqueeze(1) - - pos_emb = height_emb + width_emb - - # 1 x H x W x D -> 1 x L xD - pos_emb = pos_emb.reshape([1, self.height * self.width, -1]) - - emb = emb + pos_emb[:, : emb.shape[1], :] - - return emb diff --git a/spaces/3bdo7ss/Neutron_Chatbot/README.md b/spaces/3bdo7ss/Neutron_Chatbot/README.md deleted file mode 100644 index c0ddca48220e3541991d2754d713c8644436b94a..0000000000000000000000000000000000000000 --- a/spaces/3bdo7ss/Neutron_Chatbot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Neutron Chatbot -emoji: 📊 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.3 -app_file: app.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIConsultant/MusicGen/tests/common_utils/wav_utils.py b/spaces/AIConsultant/MusicGen/tests/common_utils/wav_utils.py deleted file mode 100644 index d3a563ee1749a58217ece55c9a08b8d93c0fc386..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/tests/common_utils/wav_utils.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from pathlib import Path -import typing as tp - -import torch -import torchaudio - - -def get_white_noise(chs: int = 1, num_frames: int = 1): - wav = torch.randn(chs, num_frames) - return wav - - -def get_batch_white_noise(bs: int = 1, chs: int = 1, num_frames: int = 1): - wav = torch.randn(bs, chs, num_frames) - return wav - - -def save_wav(path: str, wav: torch.Tensor, sample_rate: int): - fp = Path(path) - kwargs: tp.Dict[str, tp.Any] = {} - if fp.suffix == '.wav': - kwargs['encoding'] = 'PCM_S' - kwargs['bits_per_sample'] = 16 - elif fp.suffix == '.mp3': - kwargs['compression'] = 320 - torchaudio.save(str(fp), wav, sample_rate, **kwargs) diff --git a/spaces/AIFILMS/generate_human_motion/VQ-Trans/README.md b/spaces/AIFILMS/generate_human_motion/VQ-Trans/README.md deleted file mode 100644 index 547a1d4b52a5c76f0f86c641557f99d0688c0ffd..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/generate_human_motion/VQ-Trans/README.md +++ /dev/null @@ -1,400 +0,0 @@ -# Motion VQ-Trans -Pytorch implementation of paper "Generating Human Motion from Textual Descriptions with High Quality Discrete Representation" - - -[[Notebook Demo]](https://colab.research.google.com/drive/1tAHlmcpKcjg_zZrqKku7AfpqdVAIFrF8?usp=sharing) - - -![teaser](img/Teaser.png) - -If our project is helpful for your research, please consider citing : (todo) -``` -@inproceedings{shen2020ransac, - title={RANSAC-Flow: generic two-stage image alignment}, - author={Shen, Xi and Darmon, Fran{\c{c}}ois and Efros, Alexei A and Aubry, Mathieu}, - booktitle={16th European Conference on Computer Vision} - year={2020} - } -``` - - -## Table of Content -* [1. Visual Results](#1-visual-results) -* [2. Installation](#2-installation) -* [3. Quick Start](#3-quick-start) -* [4. Train](#4-train) -* [5. Evaluation](#5-evaluation) -* [6. Motion Render](#6-motion-render) -* [7. Acknowledgement](#7-acknowledgement) -* [8. ChangLog](#8-changlog) - - - - -## 1. Visual Results (More results can be found in our project page (todo)) - -![visualization](img/ALLvis.png) - - -## 2. Installation - -### 2.1. Environment - - - -Our model can be learnt in a **single GPU V100-32G** - -```bash -conda env create -f environment.yml -conda activate VQTrans -``` - -The code was tested on Python 3.8 and PyTorch 1.8.1. - - -### 2.2. Dependencies - -```bash -bash dataset/prepare/download_glove.sh -``` - - -### 2.3. Datasets - - -We are using two 3D human motion-language dataset: HumanML3D and KIT-ML. For both datasets, you could find the details as well as download link [[here]](https://github.com/EricGuo5513/HumanML3D). - -Take HumanML3D for an example, the file directory should look like this: -``` -./dataset/HumanML3D/ -├── new_joint_vecs/ -├── texts/ -├── Mean.npy # same as in [HumanML3D](https://github.com/EricGuo5513/HumanML3D) -├── Std.npy # same as in [HumanML3D](https://github.com/EricGuo5513/HumanML3D) -├── train.txt -├── val.txt -├── test.txt -├── train_val.txt -└──all.txt -``` - - -### 2.4. Motion & text feature extractors: - -We use the same extractors provided by [t2m](https://github.com/EricGuo5513/text-to-motion) to evaluate our generated motions. Please download the extractors. - -```bash -bash dataset/prepare/download_extractor.sh -``` - -### 2.5. Pre-trained models - -The pretrained model files will be stored in the 'pretrained' folder: -```bash -bash dataset/prepare/download_model.sh -``` - - - -### 2.6. Render motion (optional) - -If you want to render the generated motion, you need to install: - -```bash -sudo sh dataset/prepare/download_smpl.sh -conda install -c menpo osmesa -conda install h5py -conda install -c conda-forge shapely pyrender trimesh mapbox_earcut -``` - - - -## 3. Quick Start - -A quick start guide of how to use our code is available in [demo.ipynb](https://colab.research.google.com/drive/1tAHlmcpKcjg_zZrqKku7AfpqdVAIFrF8?usp=sharing) - -

    -demo -

    - - -## 4. Train - -Note that, for kit dataset, just need to set '--dataname kit'. - -### 4.1. VQ-VAE - -The results are saved in the folder output_vqfinal. - -
    - -VQ training - - -```bash -python3 train_vq.py \ ---batch-size 256 \ ---lr 2e-4 \ ---total-iter 300000 \ ---lr-scheduler 200000 \ ---nb-code 512 \ ---down-t 2 \ ---depth 3 \ ---dilation-growth-rate 3 \ ---out-dir output \ ---dataname t2m \ ---vq-act relu \ ---quantizer ema_reset \ ---loss-vel 0.5 \ ---recons-loss l1_smooth \ ---exp-name VQVAE -``` - -
    - -### 4.2. Motion-Transformer - -The results are saved in the folder output_transformer. - -
    - -MoTrans training - - -```bash -python3 train_t2m_trans.py \ ---exp-name VQTransformer \ ---batch-size 128 \ ---num-layers 9 \ ---embed-dim-gpt 1024 \ ---nb-code 512 \ ---n-head-gpt 16 \ ---block-size 51 \ ---ff-rate 4 \ ---drop-out-rate 0.1 \ ---resume-pth output/VQVAE/net_last.pth \ ---vq-name VQVAE \ ---out-dir output \ ---total-iter 300000 \ ---lr-scheduler 150000 \ ---lr 0.0001 \ ---dataname t2m \ ---down-t 2 \ ---depth 3 \ ---quantizer ema_reset \ ---eval-iter 10000 \ ---pkeep 0.5 \ ---dilation-growth-rate 3 \ ---vq-act relu -``` - -
    - -## 5. Evaluation - -### 5.1. VQ-VAE -
    - -VQ eval - - -```bash -python3 VQ_eval.py \ ---batch-size 256 \ ---lr 2e-4 \ ---total-iter 300000 \ ---lr-scheduler 200000 \ ---nb-code 512 \ ---down-t 2 \ ---depth 3 \ ---dilation-growth-rate 3 \ ---out-dir output \ ---dataname t2m \ ---vq-act relu \ ---quantizer ema_reset \ ---loss-vel 0.5 \ ---recons-loss l1_smooth \ ---exp-name TEST_VQVAE \ ---resume-pth output/VQVAE/net_last.pth -``` - -
    - -### 5.2. Motion-Transformer - -
    - -MoTrans eval - - -```bash -python3 GPT_eval_multi.py \ ---exp-name TEST_VQTransformer \ ---batch-size 128 \ ---num-layers 9 \ ---embed-dim-gpt 1024 \ ---nb-code 512 \ ---n-head-gpt 16 \ ---block-size 51 \ ---ff-rate 4 \ ---drop-out-rate 0.1 \ ---resume-pth output/VQVAE/net_last.pth \ ---vq-name VQVAE \ ---out-dir output \ ---total-iter 300000 \ ---lr-scheduler 150000 \ ---lr 0.0001 \ ---dataname t2m \ ---down-t 2 \ ---depth 3 \ ---quantizer ema_reset \ ---eval-iter 10000 \ ---pkeep 0.5 \ ---dilation-growth-rate 3 \ ---vq-act relu \ ---resume-gpt output/VQTransformer/net_best_fid.pth -``` - -
    - - -## 6. Motion Render - -
    - -Motion Render - - -You should input the npy folder address and the motion names. Here is an example: - -```bash -python3 render_final.py --filedir output/TEST_VQTransformer/ --motion-list 000019 005485 -``` - -
    - -### 7. Acknowledgement - -We appreciate helps from : - -* Public code like [text-to-motion](https://github.com/EricGuo5513/text-to-motion), [TM2T](https://github.com/EricGuo5513/TM2T) etc. - -### 8. ChangLog - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/base.py b/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/base.py deleted file mode 100644 index c9ecda906145e239737901809aa59db8d3e231c6..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/base.py +++ /dev/null @@ -1,76 +0,0 @@ -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class Platform(object): - """Base class for all OpenGL platforms. - - Parameters - ---------- - viewport_width : int - The width of the main viewport, in pixels. - viewport_height : int - The height of the main viewport, in pixels - """ - - def __init__(self, viewport_width, viewport_height): - self.viewport_width = viewport_width - self.viewport_height = viewport_height - - @property - def viewport_width(self): - """int : The width of the main viewport, in pixels. - """ - return self._viewport_width - - @viewport_width.setter - def viewport_width(self, value): - self._viewport_width = value - - @property - def viewport_height(self): - """int : The height of the main viewport, in pixels. - """ - return self._viewport_height - - @viewport_height.setter - def viewport_height(self, value): - self._viewport_height = value - - @abc.abstractmethod - def init_context(self): - """Create an OpenGL context. - """ - pass - - @abc.abstractmethod - def make_current(self): - """Make the OpenGL context current. - """ - pass - - @abc.abstractmethod - def make_uncurrent(self): - """Make the OpenGL context uncurrent. - """ - pass - - @abc.abstractmethod - def delete_context(self): - """Delete the OpenGL context. - """ - pass - - @abc.abstractmethod - def supports_framebuffers(self): - """Returns True if the method supports framebuffer rendering. - """ - pass - - def __del__(self): - try: - self.delete_context() - except Exception: - pass diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/htsat.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/htsat.py deleted file mode 100644 index db96116286d307a73943886f947450215e061ba2..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/htsat.py +++ /dev/null @@ -1,1022 +0,0 @@ -# Ke Chen -# knutchen@ucsd.edu -# HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION -# Some layers designed on the model -# below codes are based and referred from https://github.com/microsoft/Swin-Transformer -# Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf - -import torch -import torch.nn as nn -import torch.nn.functional as F -from itertools import repeat -import collections.abc -import math -import warnings - -from torch.nn.init import _calculate_fan_in_and_fan_out -import torch.utils.checkpoint as checkpoint - -import random - -from torchlibrosa.stft import Spectrogram, LogmelFilterBank -from torchlibrosa.augmentation import SpecAugmentation - -from itertools import repeat -from .utils import do_mixup, interpolate - -from .feature_fusion import iAFF, AFF, DAF - -# from PyTorch internals -def _ntuple(n): - def parse(x): - if isinstance(x, collections.abc.Iterable): - return x - return tuple(repeat(x, n)) - return parse - -to_1tuple = _ntuple(1) -to_2tuple = _ntuple(2) -to_3tuple = _ntuple(3) -to_4tuple = _ntuple(4) -to_ntuple = _ntuple - -def drop_path(x, drop_prob: float = 0., training: bool = False): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, - the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for - changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use - 'survival rate' as the argument. - """ - if drop_prob == 0. or not training: - return x - keep_prob = 1 - drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - random_tensor.floor_() # binarize - output = x.div(keep_prob) * random_tensor - return output - - -class DropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - """ - def __init__(self, drop_prob=None): - super(DropPath, self).__init__() - self.drop_prob = drop_prob - - def forward(self, x): - return drop_path(x, self.drop_prob, self.training) - -class PatchEmbed(nn.Module): - """ 2D Image to Patch Embedding - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, patch_stride = 16, - enable_fusion=False, fusion_type='None'): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - patch_stride = to_2tuple(patch_stride) - self.img_size = img_size - self.patch_size = patch_size - self.patch_stride = patch_stride - self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1]) - self.num_patches = self.grid_size[0] * self.grid_size[1] - self.flatten = flatten - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.enable_fusion = enable_fusion - self.fusion_type = fusion_type - - padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2) - - if (self.enable_fusion) and (self.fusion_type == 'channel_map'): - self.proj = nn.Conv2d(in_chans*4, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding) - else: - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding) - self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() - - if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']): - self.mel_conv2d = nn.Conv2d(in_chans, embed_dim, kernel_size=(patch_size[0], patch_size[1]*3), stride=(patch_stride[0], patch_stride[1] * 3), padding=padding) - if self.fusion_type == 'daf_2d': - self.fusion_model = DAF() - elif self.fusion_type == 'aff_2d': - self.fusion_model = AFF(channels=embed_dim, type='2D') - elif self.fusion_type == 'iaff_2d': - self.fusion_model = iAFF(channels=embed_dim, type='2D') - def forward(self, x, longer_idx = None): - if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']): - global_x = x[:,0:1,:,:] - - - # global processing - B, C, H, W = global_x.shape - assert H == self.img_size[0] and W == self.img_size[1], \ - f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." - global_x = self.proj(global_x) - TW = global_x.size(-1) - if len(longer_idx) > 0: - # local processing - local_x = x[longer_idx,1:,:,:].contiguous() - B, C, H, W = local_x.shape - local_x = local_x.view(B*C,1,H,W) - local_x = self.mel_conv2d(local_x) - local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3)) - local_x = local_x.permute((0,2,3,1,4)).contiguous().flatten(3) - TB,TC,TH,_ = local_x.size() - if local_x.size(-1) < TW: - local_x = torch.cat([local_x, torch.zeros((TB,TC,TH,TW-local_x.size(-1)), device=global_x.device)], dim=-1) - else: - local_x = local_x[:,:,:,:TW] - - global_x[longer_idx] = self.fusion_model(global_x[longer_idx],local_x) - x = global_x - else: - B, C, H, W = x.shape - assert H == self.img_size[0] and W == self.img_size[1], \ - f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." - x = self.proj(x) - - if self.flatten: - x = x.flatten(2).transpose(1, 2) # BCHW -> BNC - x = self.norm(x) - return x - -class Mlp(nn.Module): - """ MLP as used in Vision Transformer, MLP-Mixer and related networks - """ - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - -def _no_grad_trunc_normal_(tensor, mean, std, a, b): - # Cut & paste from PyTorch official master until it's in a few official releases - RW - # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1. + math.erf(x / math.sqrt(2.))) / 2. - - if (mean < a - 2 * std) or (mean > b + 2 * std): - warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " - "The distribution of values may be incorrect.", - stacklevel=2) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - l = norm_cdf((a - mean) / std) - u = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * l - 1, 2 * u - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - # type: (Tensor, float, float, float, float) -> Tensor - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. The values are effectively drawn from the - normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` - with values outside :math:`[a, b]` redrawn until they are within - the bounds. The method used for generating the random values works - best when :math:`a \leq \text{mean} \leq b`. - Args: - tensor: an n-dimensional `torch.Tensor` - mean: the mean of the normal distribution - std: the standard deviation of the normal distribution - a: the minimum cutoff value - b: the maximum cutoff value - Examples: - >>> w = torch.empty(3, 5) - >>> nn.init.trunc_normal_(w) - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) - - -def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): - fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) - if mode == 'fan_in': - denom = fan_in - elif mode == 'fan_out': - denom = fan_out - elif mode == 'fan_avg': - denom = (fan_in + fan_out) / 2 - - variance = scale / denom - - if distribution == "truncated_normal": - # constant is stddev of standard normal truncated to (-2, 2) - trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) - elif distribution == "normal": - tensor.normal_(std=math.sqrt(variance)) - elif distribution == "uniform": - bound = math.sqrt(3 * variance) - tensor.uniform_(-bound, bound) - else: - raise ValueError(f"invalid distribution {distribution}") - - -def lecun_normal_(tensor): - variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - r""" Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """ - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x, attn - - def extra_repr(self): - return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' - - -# We use the model based on Swintransformer Block, therefore we can use the swin-transformer pretrained model -class SwinTransformerBlock(nn.Module): - r""" Swin Transformer Block. - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resulotion. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.GELU, norm_layer=nn.LayerNorm, norm_before_mlp='ln'): - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - self.norm_before_mlp = norm_before_mlp - if min(self.input_resolution) <= self.window_size: - # if window size is larger than input resolution, we don't partition windows - self.shift_size = 0 - self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - if self.norm_before_mlp == 'ln': - self.norm2 = nn.LayerNorm(dim) - elif self.norm_before_mlp == 'bn': - self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(1, 2) - else: - raise NotImplementedError - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - if self.shift_size > 0: - # calculate attention mask for SW-MSA - H, W = self.input_resolution - img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - else: - attn_mask = None - - self.register_buffer("attn_mask", attn_mask) - - def forward(self, x): - # pdb.set_trace() - H, W = self.input_resolution - # print("H: ", H) - # print("W: ", W) - # pdb.set_trace() - B, L, C = x.shape - # assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows, attn = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x, attn - - def extra_repr(self): - return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ - f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" - - - -class PatchMerging(nn.Module): - r""" Patch Merging Layer. - Args: - input_resolution (tuple[int]): Resolution of input feature. - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.input_resolution = input_resolution - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x): - """ - x: B, H*W, C - """ - H, W = self.input_resolution - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." - - x = x.view(B, H, W, C) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - def extra_repr(self): - return f"input_resolution={self.input_resolution}, dim={self.dim}" - - -class BasicLayer(nn.Module): - """ A basic Swin Transformer layer for one stage. - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, dim, input_resolution, depth, num_heads, window_size, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, - norm_before_mlp='ln'): - - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList([ - SwinTransformerBlock(dim=dim, input_resolution=input_resolution, - num_heads=num_heads, window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop, attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer, norm_before_mlp=norm_before_mlp) - for i in range(depth)]) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x): - attns = [] - for blk in self.blocks: - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x) - else: - x, attn = blk(x) - if not self.training: - attns.append(attn.unsqueeze(0)) - if self.downsample is not None: - x = self.downsample(x) - if not self.training: - attn = torch.cat(attns, dim = 0) - attn = torch.mean(attn, dim = 0) - return x, attn - - def extra_repr(self): - return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" - - -# The Core of HTSAT -class HTSAT_Swin_Transformer(nn.Module): - r"""HTSAT based on the Swin Transformer - Args: - spec_size (int | tuple(int)): Input Spectrogram size. Default 256 - patch_size (int | tuple(int)): Patch size. Default: 4 - path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4 - in_chans (int): Number of input image channels. Default: 1 (mono) - num_classes (int): Number of classes for classification head. Default: 527 - embed_dim (int): Patch embedding dimension. Default: 96 - depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer. - num_heads (tuple(int)): Number of attention heads in different layers. - window_size (int): Window size. Default: 8 - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None - drop_rate (float): Dropout rate. Default: 0 - attn_drop_rate (float): Attention dropout rate. Default: 0 - drop_path_rate (float): Stochastic depth rate. Default: 0.1 - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False - patch_norm (bool): If True, add normalization after patch embedding. Default: True - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False - config (module): The configuration Module from config.py - """ - - def __init__(self, spec_size=256, patch_size=4, patch_stride=(4,4), - in_chans=1, num_classes=527, - embed_dim=96, depths=[2, 2, 6, 2], num_heads=[4, 8, 16, 32], - window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, - norm_layer=nn.LayerNorm, - ape=False, patch_norm=True, - use_checkpoint=False, norm_before_mlp='ln', config = None, - enable_fusion = False, fusion_type = 'None', **kwargs): - super(HTSAT_Swin_Transformer, self).__init__() - - self.config = config - self.spec_size = spec_size - self.patch_stride = patch_stride - self.patch_size = patch_size - self.window_size = window_size - self.embed_dim = embed_dim - self.depths = depths - self.ape = ape - self.in_chans = in_chans - self.num_classes = num_classes - self.num_heads = num_heads - self.num_layers = len(self.depths) - self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1)) - - self.drop_rate = drop_rate - self.attn_drop_rate = attn_drop_rate - self.drop_path_rate = drop_path_rate - - self.qkv_bias = qkv_bias - self.qk_scale = None - - self.patch_norm = patch_norm - self.norm_layer = norm_layer if self.patch_norm else None - self.norm_before_mlp = norm_before_mlp - self.mlp_ratio = mlp_ratio - - self.use_checkpoint = use_checkpoint - - self.enable_fusion = enable_fusion - self.fusion_type = fusion_type - - # process mel-spec ; used only once - self.freq_ratio = self.spec_size // self.config.mel_bins - window = 'hann' - center = True - pad_mode = 'reflect' - ref = 1.0 - amin = 1e-10 - top_db = None - self.interpolate_ratio = 32 # Downsampled ratio - # Spectrogram extractor - self.spectrogram_extractor = Spectrogram(n_fft=config.window_size, hop_length=config.hop_size, - win_length=config.window_size, window=window, center=center, pad_mode=pad_mode, - freeze_parameters=True) - # Logmel feature extractor - self.logmel_extractor = LogmelFilterBank(sr=config.sample_rate, n_fft=config.window_size, - n_mels=config.mel_bins, fmin=config.fmin, fmax=config.fmax, ref=ref, amin=amin, top_db=top_db, - freeze_parameters=True) - # Spec augmenter - self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, - freq_drop_width=8, freq_stripes_num=2) # 2 2 - self.bn0 = nn.BatchNorm2d(self.config.mel_bins) - - - # split spctrogram into non-overlapping patches - self.patch_embed = PatchEmbed( - img_size=self.spec_size, patch_size=self.patch_size, in_chans=self.in_chans, - embed_dim=self.embed_dim, norm_layer=self.norm_layer, patch_stride = patch_stride, - enable_fusion=self.enable_fusion, fusion_type=self.fusion_type - ) - - num_patches = self.patch_embed.num_patches - patches_resolution = self.patch_embed.grid_size - self.patches_resolution = patches_resolution - - # absolute position embedding - if self.ape: - self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.embed_dim)) - trunc_normal_(self.absolute_pos_embed, std=.02) - - self.pos_drop = nn.Dropout(p=self.drop_rate) - - # stochastic depth - dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))] # stochastic depth decay rule - - # build layers - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = BasicLayer(dim=int(self.embed_dim * 2 ** i_layer), - input_resolution=(patches_resolution[0] // (2 ** i_layer), - patches_resolution[1] // (2 ** i_layer)), - depth=self.depths[i_layer], - num_heads=self.num_heads[i_layer], - window_size=self.window_size, - mlp_ratio=self.mlp_ratio, - qkv_bias=self.qkv_bias, qk_scale=self.qk_scale, - drop=self.drop_rate, attn_drop=self.attn_drop_rate, - drop_path=dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])], - norm_layer=self.norm_layer, - downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, - use_checkpoint=use_checkpoint, - norm_before_mlp=self.norm_before_mlp) - self.layers.append(layer) - - self.norm = self.norm_layer(self.num_features) - self.avgpool = nn.AdaptiveAvgPool1d(1) - self.maxpool = nn.AdaptiveMaxPool1d(1) - - SF = self.spec_size // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] // self.freq_ratio - self.tscam_conv = nn.Conv2d( - in_channels = self.num_features, - out_channels = self.num_classes, - kernel_size = (SF,3), - padding = (0,1) - ) - self.head = nn.Linear(num_classes, num_classes) - - if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']): - self.mel_conv1d = nn.Sequential( - nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2), - nn.BatchNorm1d(64) - ) - if self.fusion_type == 'daf_1d': - self.fusion_model = DAF() - elif self.fusion_type == 'aff_1d': - self.fusion_model = AFF(channels=64, type='1D') - elif self.fusion_type == 'iaff_1d': - self.fusion_model = iAFF(channels=64, type='1D') - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'absolute_pos_embed'} - - @torch.jit.ignore - def no_weight_decay_keywords(self): - return {'relative_position_bias_table'} - - - def forward_features(self, x, longer_idx = None): - # A deprecated optimization for using a hierarchical output from different blocks - - frames_num = x.shape[2] - x = self.patch_embed(x, longer_idx = longer_idx) - if self.ape: - x = x + self.absolute_pos_embed - x = self.pos_drop(x) - for i, layer in enumerate(self.layers): - x, attn = layer(x) - # for x - x = self.norm(x) - B, N, C = x.shape - SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] - ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1] - x = x.permute(0,2,1).contiguous().reshape(B, C, SF, ST) - B, C, F, T = x.shape - # group 2D CNN - c_freq_bin = F // self.freq_ratio - x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T) - x = x.permute(0,1,3,2,4).contiguous().reshape(B, C, c_freq_bin, -1) - # get latent_output - fine_grained_latent_output = torch.mean(x, dim = 2) - fine_grained_latent_output = interpolate(fine_grained_latent_output.permute(0,2,1).contiguous(), 8 * self.patch_stride[1]) - - latent_output = self.avgpool(torch.flatten(x,2)) - latent_output = torch.flatten(latent_output, 1) - - # display the attention map, if needed - - x = self.tscam_conv(x) - x = torch.flatten(x, 2) # B, C, T - - fpx = interpolate(torch.sigmoid(x).permute(0,2,1).contiguous(), 8 * self.patch_stride[1]) - - x = self.avgpool(x) - x = torch.flatten(x, 1) - - output_dict = { - 'framewise_output': fpx, # already sigmoided - 'clipwise_output': torch.sigmoid(x), - 'fine_grained_embedding': fine_grained_latent_output, - 'embedding': latent_output - } - - return output_dict - - def crop_wav(self, x, crop_size, spe_pos = None): - time_steps = x.shape[2] - tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device) - for i in range(len(x)): - if spe_pos is None: - crop_pos = random.randint(0, time_steps - crop_size - 1) - else: - crop_pos = spe_pos - tx[i][0] = x[i, 0, crop_pos:crop_pos + crop_size,:] - return tx - - # Reshape the wavform to a img size, if you want to use the pretrained swin transformer model - def reshape_wav2img(self, x): - B, C, T, F = x.shape - target_T = int(self.spec_size * self.freq_ratio) - target_F = self.spec_size // self.freq_ratio - assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size" - # to avoid bicubic zero error - if T < target_T: - x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True) - if F < target_F: - x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True) - x = x.permute(0,1,3,2).contiguous() - x = x.reshape(x.shape[0], x.shape[1], x.shape[2], self.freq_ratio, x.shape[3] // self.freq_ratio) - # print(x.shape) - x = x.permute(0,1,3,2,4).contiguous() - x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4]) - return x - - # Repeat the wavform to a img size, if you want to use the pretrained swin transformer model - def repeat_wat2img(self, x, cur_pos): - B, C, T, F = x.shape - target_T = int(self.spec_size * self.freq_ratio) - target_F = self.spec_size // self.freq_ratio - assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size" - # to avoid bicubic zero error - if T < target_T: - x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True) - if F < target_F: - x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True) - x = x.permute(0,1,3,2).contiguous() # B C F T - x = x[:,:,:,cur_pos:cur_pos + self.spec_size] - x = x.repeat(repeats = (1,1,4,1)) - return x - - def forward(self, x: torch.Tensor, mixup_lambda = None, infer_mode = False, device=None):# out_feat_keys: List[str] = None): - - if self.enable_fusion and x["longer"].sum() == 0: - # if no audio is longer than 10s, then randomly select one audio to be longer - x["longer"][torch.randint(0, x["longer"].shape[0], (1,))] = True - - if not self.enable_fusion: - x = x["waveform"].to(device=device, non_blocking=True) - x = self.spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins) - x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - if self.training: - x = self.spec_augmenter(x) - - if self.training and mixup_lambda is not None: - x = do_mixup(x, mixup_lambda) - - x = self.reshape_wav2img(x) - output_dict = self.forward_features(x) - else: - longer_list = x["longer"].to(device=device, non_blocking=True) - x = x["mel_fusion"].to(device=device, non_blocking=True) - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - longer_list_idx = torch.where(longer_list)[0] - if self.fusion_type in ['daf_1d','aff_1d','iaff_1d']: - new_x = x[:,0:1,:,:].clone().contiguous() - if len(longer_list_idx) > 0: - # local processing - fusion_x_local = x[longer_list_idx,1:,:,:].clone().contiguous() - FB,FC,FT,FF = fusion_x_local.size() - fusion_x_local = fusion_x_local.view(FB * FC, FT, FF) - fusion_x_local = torch.permute(fusion_x_local, (0,2,1)).contiguous() - fusion_x_local = self.mel_conv1d(fusion_x_local) - fusion_x_local = fusion_x_local.view(FB,FC,FF,fusion_x_local.size(-1)) - fusion_x_local = torch.permute(fusion_x_local, (0,2,1,3)).contiguous().flatten(2) - if fusion_x_local.size(-1) < FT: - fusion_x_local = torch.cat([fusion_x_local, torch.zeros((FB,FF,FT- fusion_x_local.size(-1)), device=device)], dim=-1) - else: - fusion_x_local = fusion_x_local[:,:,:FT] - # 1D fusion - new_x = new_x.squeeze(1).permute((0,2,1)).contiguous() - new_x[longer_list_idx] = self.fusion_model(new_x[longer_list_idx], fusion_x_local) - x = new_x.permute((0,2,1)).contiguous()[:,None,:,:] - else: - x = new_x - - elif self.fusion_type in ['daf_2d','aff_2d','iaff_2d','channel_map']: - x = x # no change - - if self.training: - x = self.spec_augmenter(x) - if self.training and mixup_lambda is not None: - x = do_mixup(x, mixup_lambda) - - x = self.reshape_wav2img(x) - output_dict = self.forward_features(x, longer_idx = longer_list_idx) - - # if infer_mode: - # # in infer mode. we need to handle different length audio input - # frame_num = x.shape[2] - # target_T = int(self.spec_size * self.freq_ratio) - # repeat_ratio = math.floor(target_T / frame_num) - # x = x.repeat(repeats=(1,1,repeat_ratio,1)) - # x = self.reshape_wav2img(x) - # output_dict = self.forward_features(x) - # else: - # if x.shape[2] > self.freq_ratio * self.spec_size: - # if self.training: - # x = self.crop_wav(x, crop_size=self.freq_ratio * self.spec_size) - # x = self.reshape_wav2img(x) - # output_dict = self.forward_features(x) - # else: - # # Change: Hard code here - # overlap_size = (x.shape[2] - 1) // 4 - # output_dicts = [] - # crop_size = (x.shape[2] - 1) // 2 - # for cur_pos in range(0, x.shape[2] - crop_size - 1, overlap_size): - # tx = self.crop_wav(x, crop_size = crop_size, spe_pos = cur_pos) - # tx = self.reshape_wav2img(tx) - # output_dicts.append(self.forward_features(tx)) - # clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device) - # framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device) - # for d in output_dicts: - # clipwise_output += d["clipwise_output"] - # framewise_output += d["framewise_output"] - # clipwise_output = clipwise_output / len(output_dicts) - # framewise_output = framewise_output / len(output_dicts) - # output_dict = { - # 'framewise_output': framewise_output, - # 'clipwise_output': clipwise_output - # } - # else: # this part is typically used, and most easy one - # x = self.reshape_wav2img(x) - # output_dict = self.forward_features(x) - # x = self.head(x) - - # We process the data in the dataloader part, in that here we only consider the input_T < fixed_T - - - - return output_dict - -def create_htsat_model(audio_cfg, enable_fusion=False, fusion_type='None'): - try: - - assert audio_cfg.model_name in ["tiny", "base", "large"], "model name for HTS-AT is wrong!" - if audio_cfg.model_name == "tiny": - model = HTSAT_Swin_Transformer( - spec_size=256, - patch_size=4, - patch_stride=(4,4), - num_classes=audio_cfg.class_num, - embed_dim=96, - depths=[2,2,6,2], - num_heads=[4,8,16,32], - window_size=8, - config = audio_cfg, - enable_fusion = enable_fusion, - fusion_type = fusion_type - ) - elif audio_cfg.model_name == "base": - model = HTSAT_Swin_Transformer( - spec_size=256, - patch_size=4, - patch_stride=(4,4), - num_classes=audio_cfg.class_num, - embed_dim=128, - depths=[2,2,12,2], - num_heads=[4,8,16,32], - window_size=8, - config = audio_cfg, - enable_fusion = enable_fusion, - fusion_type = fusion_type - ) - elif audio_cfg.model_name == "large": - model = HTSAT_Swin_Transformer( - spec_size=256, - patch_size=4, - patch_stride=(4,4), - num_classes=audio_cfg.class_num, - embed_dim=256, - depths=[2,2,12,2], - num_heads=[4,8,16,32], - window_size=8, - config = audio_cfg, - enable_fusion = enable_fusion, - fusion_type = fusion_type - ) - - return model - except: - raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.') - \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/transforms.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/transforms.py deleted file mode 100644 index bdab7eb6b94ac21e950e2870b89da7bbac1f4a8e..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/transforms.py +++ /dev/null @@ -1,98 +0,0 @@ -import logging -import os -from pathlib import Path - -import albumentations -import numpy as np -import torch -from tqdm import tqdm - -logger = logging.getLogger(f'main.{__name__}') - - -class StandardNormalizeAudio(object): - ''' - Frequency-wise normalization - ''' - def __init__(self, specs_dir, train_ids_path='./data/vggsound_train.txt', cache_path='./data/'): - self.specs_dir = specs_dir - self.train_ids_path = train_ids_path - # making the stats filename to match the specs dir name - self.cache_path = os.path.join(cache_path, f'train_means_stds_{Path(specs_dir).stem}.txt') - logger.info('Assuming that the input stats are calculated using preprocessed spectrograms (log)') - self.train_stats = self.calculate_or_load_stats() - - def __call__(self, item): - # just to generalizat the input handling. Useful for FID, IS eval and training other staff - if isinstance(item, dict): - if 'input' in item: - input_key = 'input' - elif 'image' in item: - input_key = 'image' - else: - raise NotImplementedError - item[input_key] = (item[input_key] - self.train_stats['means']) / self.train_stats['stds'] - elif isinstance(item, torch.Tensor): - # broadcasts np.ndarray (80, 1) to (1, 80, 1) because item is torch.Tensor (B, 80, T) - item = (item - self.train_stats['means']) / self.train_stats['stds'] - else: - raise NotImplementedError - return item - - def calculate_or_load_stats(self): - try: - # (F, 2) - train_stats = np.loadtxt(self.cache_path) - means, stds = train_stats.T - logger.info('Trying to load train stats for Standard Normalization of inputs') - except OSError: - logger.info('Could not find the precalculated stats for Standard Normalization. Calculating...') - train_vid_ids = open(self.train_ids_path) - specs_paths = [os.path.join(self.specs_dir, f'{i.rstrip()}_mel.npy') for i in train_vid_ids] - means = [None] * len(specs_paths) - stds = [None] * len(specs_paths) - for i, path in enumerate(tqdm(specs_paths)): - spec = np.load(path) - means[i] = spec.mean(axis=1) - stds[i] = spec.std(axis=1) - # (F) <- (num_files, F) - means = np.array(means).mean(axis=0) - stds = np.array(stds).mean(axis=0) - # saving in two columns - np.savetxt(self.cache_path, np.vstack([means, stds]).T, fmt='%0.8f') - means = means.reshape(-1, 1) - stds = stds.reshape(-1, 1) - return {'means': means, 'stds': stds} - -class ToTensor(object): - - def __call__(self, item): - item['input'] = torch.from_numpy(item['input']).float() - # if 'target' in item: - item['target'] = torch.tensor(item['target']) - return item - -class Crop(object): - - def __init__(self, cropped_shape=None, random_crop=False): - self.cropped_shape = cropped_shape - if cropped_shape is not None: - mel_num, spec_len = cropped_shape - if random_crop: - self.cropper = albumentations.RandomCrop - else: - self.cropper = albumentations.CenterCrop - self.preprocessor = albumentations.Compose([self.cropper(mel_num, spec_len)]) - else: - self.preprocessor = lambda **kwargs: kwargs - - def __call__(self, item): - item['input'] = self.preprocessor(image=item['input'])['image'] - return item - - -if __name__ == '__main__': - cropper = Crop([80, 848]) - item = {'input': torch.rand([80, 860])} - outputs = cropper(item) - print(outputs['input'].shape) diff --git a/spaces/ASJMO/freegpt/client/css/hljs.css b/spaces/ASJMO/freegpt/client/css/hljs.css deleted file mode 100644 index 1fcf16ba358a7c5d287b1c6e33c3afbfff38f623..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/client/css/hljs.css +++ /dev/null @@ -1,68 +0,0 @@ -.hljs { - color: #e9e9f4; - background: #28293629; - border-radius: var(--border-radius-1); - border: 1px solid var(--blur-border); - font-size: 15px; - word-wrap: break-word; - white-space: pre-wrap; -} - -/* style for hljs copy */ -.hljs-copy-wrapper { - position: relative; - overflow: hidden; -} - -.hljs-copy-wrapper:hover .hljs-copy-button, -.hljs-copy-button:focus { - transform: translateX(0); -} - -.hljs-copy-button { - position: absolute; - transform: translateX(calc(100% + 1.125em)); - top: 1em; - right: 1em; - width: 2rem; - height: 2rem; - text-indent: -9999px; - color: #fff; - border-radius: 0.25rem; - border: 1px solid #ffffff22; - background-color: #2d2b57; - background-image: url('data:image/svg+xml;utf-8,'); - background-repeat: no-repeat; - background-position: center; - transition: background-color 200ms ease, transform 200ms ease-out; -} - -.hljs-copy-button:hover { - border-color: #ffffff44; -} - -.hljs-copy-button:active { - border-color: #ffffff66; -} - -.hljs-copy-button[data-copied="true"] { - text-indent: 0; - width: auto; - background-image: none; -} - -.hljs-copy-alert { - clip: rect(0 0 0 0); - clip-path: inset(50%); - height: 1px; - overflow: hidden; - position: absolute; - white-space: nowrap; - width: 1px; -} - -@media (prefers-reduced-motion) { - .hljs-copy-button { - transition: none; - } -} diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py deleted file mode 100644 index c24cab973dbbededbd98f0d42444e689d7158695..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py +++ /dev/null @@ -1,135 +0,0 @@ -_base_ = '../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' - -max_epochs = 100 # 训练的最大 epoch -data_root = './data-df2/' # 数据集目录的绝对路径 -# data_root = '/root/workspace/mmyolo/data/cat/' # Docker 容器里面数据集目录的绝对路径 - -# 结果保存的路径,可以省略,省略保存的文件名位于 work_dirs 下 config 同名的文件夹中 -# 如果某个 config 只是修改了部分参数,修改这个变量就可以将新的训练文件保存到其他地方 -work_dir = './work_dirs/yolov5_s_df2' - -# load_from 可以指定本地路径或者 URL,设置了 URL 会自动进行下载,因为上面已经下载过,我们这里设置本地路径 -# 因为本教程是在 cat 数据集上微调,故这里需要使用 `load_from` 来加载 MMYOLO 中的预训练模型,这样可以在加快收敛速度的同时保证精度 -# load_from = './work_dirs/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa - -# 根据自己的 GPU 情况,修改 batch size,YOLOv5-s 默认为 8卡 x 16bs -train_batch_size_per_gpu = 32 -train_num_workers = 4 # 推荐使用 train_num_workers = nGPU x 4 - -save_epoch_intervals = 2 # 每 interval 轮迭代进行一次保存一次权重 - -# 根据自己的 GPU 情况,修改 base_lr,修改的比例是 base_lr_default * (your_bs / default_bs) -base_lr = _base_.base_lr / 4 - -anchors = [ # 此处已经根据数据集特点更新了 anchor,关于 anchor 的生成,后面小节会讲解 - [(68, 69), (154, 91), (143, 162)], # P3/8 - [(242, 160), (189, 287), (391, 207)], # P4/16 - [(353, 337), (539, 341), (443, 432)] # P5/32 -] - -class_name = ('short_sleeved_shirt', - 'long_sleeved_shirt', - 'short_sleeved_outwear', - 'long_sleeved_outwear', - 'vest', - 'sling', - 'shorts', - 'trousers', - 'skirt', - 'short_sleeved_dress', - 'long_sleeved_dress', - 'vest_dress', - 'sling_dress') # 根据 class_with_id.txt 类别信息,设置 class_name - -num_classes = len(class_name) -metainfo = dict( - classes=class_name, - palette=[(255, 0, 0), - (255, 128, 0), - (255, 255, 0), - (128, 255, 0), - (0, 255, 0), - (0, 255, 128), - (0, 255, 255), - (0, 128, 255), - (0, 0, 255), - (127, 0, 255), - (255, 0, 255), - (255, 0, 127), - (128, 128, 128)] # 画图时候的颜色,随便设置即可 -) - -train_cfg = dict( - max_epochs=max_epochs, - val_begin=20, # 第几个 epoch 后验证,这里设置 20 是因为前 20 个 epoch 精度不高,测试意义不大,故跳过 - val_interval=save_epoch_intervals # 每 val_interval 轮迭代进行一次测试评估 - # dynamic_intervals=[(max_epochs-_base_.num_last_epochs, 1)] -) - -model = dict( - bbox_head=dict( - head_module=dict(num_classes=num_classes), - prior_generator=dict(base_sizes=anchors), - - # loss_cls 会根据 num_classes 动态调整,但是 num_classes = 1 的时候,loss_cls 恒为 0 - loss_cls=dict(loss_weight=0.5 * - (num_classes / 80 * 3 / _base_.num_det_layers)))) - -train_dataloader = dict( - batch_size=train_batch_size_per_gpu, - num_workers=train_num_workers, - dataset=dict( - _delete_=True, - type='RepeatDataset', - # 数据量太少的话,可以使用 RepeatDataset ,在每个 epoch 内重复当前数据集 n 次,这里设置 5 是重复 5 次 - times=2, - dataset=dict( - type=_base_.dataset_type, - data_root=data_root, - metainfo=metainfo, - ann_file='annotations/trainval.json', - data_prefix=dict(img='smaller-dataset/'), - filter_cfg=dict(filter_empty_gt=False, min_size=32), - pipeline=_base_.train_pipeline))) - -val_dataloader = dict( - dataset=dict( - metainfo=metainfo, - data_root=data_root, - ann_file='annotations/trainval.json', - data_prefix=dict(img='smaller-dataset/'))) - -test_dataloader = val_dataloader - -val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json') -test_evaluator = val_evaluator - -optim_wrapper = dict(optimizer=dict(lr=base_lr)) - -default_hooks = dict( - # 设置间隔多少个 epoch 保存模型,以及保存模型最多几个,`save_best` 是另外保存最佳模型(推荐) - checkpoint=dict( - type='CheckpointHook', - interval=save_epoch_intervals, - max_keep_ckpts=5, - save_best='auto'), - param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10), - # logger 输出的间隔 - logger=dict(type='LoggerHook', interval=10)) - -# custom_hooks = [ -# dict( -# type="EMAHook", -# ema_type="ExpMomentumEMA", -# momentum=0.0001, -# update_buffers=True, -# strict_load=False, -# priority=49), -# dict( -# type="mmdet.PipelineSwitchHook", -# switch_epoch=max_epochs-max_epochs-_base_.num_last_epochs, -# switch_pipeline=_base_.train_pipeline_stage2 -# ) -# ] - -visualizer = dict(vis_backends=[dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) \ No newline at end of file diff --git a/spaces/Abhilashvj/planogram-compliance/data/scripts/get_coco.sh b/spaces/Abhilashvj/planogram-compliance/data/scripts/get_coco.sh deleted file mode 100644 index 0d388b0a12a84c504a2b12e85e3edcac5d78530c..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/data/scripts/get_coco.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Download COCO 2017 dataset http://cocodataset.org -# Example usage: bash data/scripts/get_coco.sh -# parent -# ├── yolov5 -# └── datasets -# └── coco ← downloads here - -# Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments -if [ "$#" -gt 0 ]; then - for opt in "$@"; do - case "${opt}" in - --train) train=true ;; - --val) val=true ;; - --test) test=true ;; - --segments) segments=true ;; - esac - done -else - train=true - val=true - test=false - segments=false -fi - -# Download/unzip labels -d='../datasets' # unzip directory -url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -if [ "$segments" == "true" ]; then - f='coco2017labels-segments.zip' # 168 MB -else - f='coco2017labels.zip' # 46 MB -fi -echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & - -# Download/unzip images -d='../datasets/coco/images' # unzip directory -url=http://images.cocodataset.org/zips/ -if [ "$train" == "true" ]; then - f='train2017.zip' # 19G, 118k images - echo 'Downloading' $url$f '...' - curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & -fi -if [ "$val" == "true" ]; then - f='val2017.zip' # 1G, 5k images - echo 'Downloading' $url$f '...' - curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & -fi -if [ "$test" == "true" ]; then - f='test2017.zip' # 7G, 41k images (optional) - echo 'Downloading' $url$f '...' - curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & -fi -wait # finish background tasks diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/ChatgptLogin.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/ChatgptLogin.py deleted file mode 100644 index 07f3b914a12c2bf09bfb661cb0cf803b79299a14..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/ChatgptLogin.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import annotations - -import os, re -from aiohttp import ClientSession - -from ..base_provider import AsyncProvider, format_prompt - - -class ChatgptLogin(AsyncProvider): - url = "https://opchatgpts.net" - supports_gpt_35_turbo = True - working = True - _nonce = None - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> str: - headers = { - "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", - "Accept" : "*/*", - "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "Origin" : "https://opchatgpts.net", - "Alt-Used" : "opchatgpts.net", - "Referer" : "https://opchatgpts.net/chatgpt-free-use/", - "Sec-Fetch-Dest" : "empty", - "Sec-Fetch-Mode" : "cors", - "Sec-Fetch-Site" : "same-origin", - } - async with ClientSession( - headers=headers - ) as session: - if not cls._nonce: - async with session.get( - "https://opchatgpts.net/chatgpt-free-use/", - params={"id": os.urandom(6).hex()}, - ) as response: - result = re.search(r'data-nonce="(.*?)"', await response.text()) - if not result: - raise RuntimeError("No nonce value") - cls._nonce = result.group(1) - data = { - "_wpnonce": cls._nonce, - "post_id": 28, - "url": "https://opchatgpts.net/chatgpt-free-use", - "action": "wpaicg_chat_shortcode_message", - "message": format_prompt(messages), - "bot_id": 0 - } - async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response: - response.raise_for_status() - data = await response.json() - if "data" in data: - return data["data"] - elif "msg" in data: - raise RuntimeError(data["msg"]) - else: - raise RuntimeError(f"Response: {data}") - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/agentverse/agents/simulation_agent/prisoner_dilemma.py b/spaces/AgentVerse/agentVerse/agentverse/agents/simulation_agent/prisoner_dilemma.py deleted file mode 100644 index bf257c1688ae593b148c43afa748179633f765ce..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/agents/simulation_agent/prisoner_dilemma.py +++ /dev/null @@ -1,167 +0,0 @@ -from __future__ import annotations - -import logging -from string import Template -from typing import TYPE_CHECKING, List - -from agentverse.message import Message - -# from . import agent_registry -# from .base import BaseAgent -from agentverse.agents import agent_registry -from agentverse.agents.base import BaseAgent - -if TYPE_CHECKING: - from agentverse.environments.base import BaseEnvironment - - -class PrisonerDilemaAgent(BaseAgent): - def step( - self, - environment: BaseEnvironment, - env_description: str = "", - ) -> Message: - prompt = self._fill_prompt_template(env_description) - - parsed_response = None - for i in range(self.max_retry): - try: - response = self.llm.generate_response(prompt) - parsed_response = self.output_parser.parse(self, environment, response) - break - except Exception as e: - logging.error(e) - logging.warning("Retrying...") - continue - - if parsed_response is None: - logging.error(f"{self.name} failed to generate valid response.") - - message = Message( - content="" - if parsed_response is None - else parsed_response.return_values["output"], - sender=self.name, - receiver=self.get_receiver(), - ) - return message - - async def astep( - self, environment: BaseEnvironment, env_description: str = "" - ) -> Message: - """Asynchronous version of step""" - prompt = self._fill_prompt_template(env_description) - - parsed_response = None - for i in range(self.max_retry): - try: - response = await self.llm.agenerate_response(prompt) - parsed_response = self.output_parser.parse(self, environment, response) - break - except Exception as e: - logging.error(e) - logging.warning("Retrying...") - continue - - if parsed_response is None: - logging.error(f"{self.name} failed to generate valid response.") - - message = Message( - content="" - if parsed_response is None - else parsed_response.return_values["output"], - sender=self.name, - receiver=self.get_receiver(), - ) - return message - - def _fill_prompt_template(self, env_description: str = "") -> str: - """Fill the placeholders in the prompt template - - In the conversation agent, three placeholders are supported: - - ${agent_name}: the name of the agent - - ${env_description}: the description of the environment - - ${role_description}: the description of the role of the agent - - ${chat_history}: the chat history of the agent - """ - input_arguments = { - "agent_name": self.name, - "env_description": env_description, - "role_description": self.role_description, - "chat_history": self.memory.to_string(add_sender_prefix=True), - } - return Template(self.prompt_template).safe_substitute(input_arguments) - - def add_message_to_memory(self, messages: List[Message]) -> None: - self.memory.add_message(messages) - - def reset(self) -> None: - """Reset the agent""" - self.memory.reset() - # TODO: reset receiver - - -@agent_registry.register("police") -class PoliceAgent(PrisonerDilemaAgent): - interrogating_form: str - - def _fill_prompt_template(self, env_description: str = "") -> str: - """Fill the placeholders in the prompt template - - In the conversation agent, three placeholders are supported: - - ${agent_name}: the name of the agent - - ${env_description}: the description of the environment - - ${role_description}: the description of the role of the agent - - ${chat_history}: the chat history of the agent - """ - input_arguments = { - "agent_name": self.name, - "env_description": env_description, - "role_description": self.role_description, - "chat_history": self.memory.to_string(add_sender_prefix=True), - } - - role_argument = { - "interrogating_form": self.interrogating_form, - } - - role_description = Template(self.role_description).safe_substitute( - role_argument - ) - input_arguments["role_description"] = role_description - - return Template(self.prompt_template).safe_substitute(input_arguments) - - -@agent_registry.register("prisoner") -class PrisonerAgent(PrisonerDilemaAgent): - personality: str - relationship_with_another: str - - def _fill_prompt_template(self, env_description: str = "") -> str: - """Fill the placeholders in the prompt template - - In the conversation agent, three placeholders are supported: - - ${agent_name}: the name of the agent - - ${env_description}: the description of the environment - - ${role_description}: the description of the role of the agent - - ${chat_history}: the chat history of the agent - """ - input_arguments = { - "agent_name": self.name, - "env_description": env_description, - "role_description": self.role_description, - "chat_history": self.memory.to_string(add_sender_prefix=True), - } - - role_argument = { - "personality": self.personality, - "relationship_with_another": self.relationship_with_another, - } - - role_description = Template(self.role_description).safe_substitute( - role_argument - ) - input_arguments["role_description"] = role_description - - return Template(self.prompt_template).safe_substitute(input_arguments) diff --git a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/transforms.py b/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Ameaou/academic-chatgpt3.1/config.py b/spaces/Ameaou/academic-chatgpt3.1/config.py deleted file mode 100644 index 46a7dccf61b6edd94bf8484f3b80de7e8fe91628..0000000000000000000000000000000000000000 --- a/spaces/Ameaou/academic-chatgpt3.1/config.py +++ /dev/null @@ -1,58 +0,0 @@ -# [step 1]>> 例如: API_KEY = "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" (此key无效) -API_KEY = "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" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2" - -# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改 -USE_PROXY = True -if USE_PROXY: - # 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改 - # 例如 "socks5h://localhost:11284" - # [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http - # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上) - # [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上 - - # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284) - proxies = { - # [协议]:// [地址] :[端口] - "http": "http://127.0.0.1:7890", - "https": "http://127.0.0.1:7890", - } -else: - proxies = None - -# [step 3]>> 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次 -# 一言以蔽之:免费用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview -DEFAULT_WORKER_NUM = 3 - - -# [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改 -# 对话窗的高度 -CHATBOT_HEIGHT = 1115 - -# 代码高亮 -CODE_HIGHLIGHT = True - -# 窗口布局 -LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局) - -# 发送请求到OpenAI后,等待多久判定为超时 -TIMEOUT_SECONDS = 30 - -# 网页的端口, -1代表随机端口 -WEB_PORT = -1 - -# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 -MAX_RETRY = 2 - -# OpenAI模型选择是(gpt4现在只对申请成功的人开放) -LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm" -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"] - -# 本地LLM模型如ChatGLM的执行方式 CPU/GPU -LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" - -# 设置gradio的并行线程数(不需要修改) -CONCURRENT_COUNT = 100 - -# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个) -# [("username", "password"), ("username2", "password2"), ...] -AUTHENTICATION = [] diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/util.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/util.py deleted file mode 100644 index 27bce0ab18a69f142db54084c0be2c014e60c20d..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/util.py +++ /dev/null @@ -1,492 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Miscellaneous utility classes and functions.""" - -import ctypes -import fnmatch -import importlib -import inspect -import numpy as np -import os -import shutil -import sys -import types -import io -import pickle -import re -import requests -import html -import hashlib -import glob -import tempfile -import urllib -import urllib.request -import uuid - -from distutils.util import strtobool -from typing import Any, List, Tuple, Union - - -# Util classes -# ------------------------------------------------------------------------------------------ - - -class EasyDict(dict): - """Convenience class that behaves like a dict but allows access with the attribute syntax.""" - - def __getattr__(self, name: str) -> Any: - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name: str, value: Any) -> None: - self[name] = value - - def __delattr__(self, name: str) -> None: - del self[name] - - -class Logger(object): - """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" - - def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): - self.file = None - - if file_name is not None: - self.file = open(file_name, file_mode) - - self.should_flush = should_flush - self.stdout = sys.stdout - self.stderr = sys.stderr - - sys.stdout = self - sys.stderr = self - - def __enter__(self) -> "Logger": - return self - - def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: - self.close() - - def write(self, text: Union[str, bytes]) -> None: - """Write text to stdout (and a file) and optionally flush.""" - if isinstance(text, bytes): - text = text.decode() - if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash - return - - if self.file is not None: - self.file.write(text) - - self.stdout.write(text) - - if self.should_flush: - self.flush() - - def flush(self) -> None: - """Flush written text to both stdout and a file, if open.""" - if self.file is not None: - self.file.flush() - - self.stdout.flush() - - def close(self) -> None: - """Flush, close possible files, and remove stdout/stderr mirroring.""" - self.flush() - - # if using multiple loggers, prevent closing in wrong order - if sys.stdout is self: - sys.stdout = self.stdout - if sys.stderr is self: - sys.stderr = self.stderr - - if self.file is not None: - self.file.close() - self.file = None - - -# Cache directories -# ------------------------------------------------------------------------------------------ - -_dnnlib_cache_dir = None - - -def set_cache_dir(path: str) -> None: - global _dnnlib_cache_dir - _dnnlib_cache_dir = path - - -def make_cache_dir_path(*paths: str) -> str: - if _dnnlib_cache_dir is not None: - return os.path.join(_dnnlib_cache_dir, *paths) - if 'DNNLIB_CACHE_DIR' in os.environ: - return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths) - if 'HOME' in os.environ: - return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths) - if 'USERPROFILE' in os.environ: - return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths) - return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths) - -# Small util functions -# ------------------------------------------------------------------------------------------ - - -def format_time(seconds: Union[int, float]) -> str: - """Convert the seconds to human readable string with days, hours, minutes and seconds.""" - s = int(np.rint(seconds)) - - if s < 60: - return "{0}s".format(s) - elif s < 60 * 60: - return "{0}m {1:02}s".format(s // 60, s % 60) - elif s < 24 * 60 * 60: - return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) - else: - return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) - - -def ask_yes_no(question: str) -> bool: - """Ask the user the question until the user inputs a valid answer.""" - while True: - try: - print("{0} [y/n]".format(question)) - return strtobool(input().lower()) - except ValueError: - pass - - -def tuple_product(t: Tuple) -> Any: - """Calculate the product of the tuple elements.""" - result = 1 - - for v in t: - result *= v - - return result - - -_str_to_ctype = { - "uint8": ctypes.c_ubyte, - "uint16": ctypes.c_uint16, - "uint32": ctypes.c_uint32, - "uint64": ctypes.c_uint64, - "int8": ctypes.c_byte, - "int16": ctypes.c_int16, - "int32": ctypes.c_int32, - "int64": ctypes.c_int64, - "float32": ctypes.c_float, - "float64": ctypes.c_double -} - - -def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: - """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" - type_str = None - - if isinstance(type_obj, str): - type_str = type_obj - elif hasattr(type_obj, "__name__"): - type_str = type_obj.__name__ - elif hasattr(type_obj, "name"): - type_str = type_obj.name - else: - raise RuntimeError("Cannot infer type name from input") - - assert type_str in _str_to_ctype.keys() - - my_dtype = np.dtype(type_str) - my_ctype = _str_to_ctype[type_str] - - assert my_dtype.itemsize == ctypes.sizeof(my_ctype) - - return my_dtype, my_ctype - - -def is_pickleable(obj: Any) -> bool: - try: - with io.BytesIO() as stream: - pickle.dump(obj, stream) - return True - except: - return False - - -# Functionality to import modules/objects by name, and call functions by name -# ------------------------------------------------------------------------------------------ - -def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: - """Searches for the underlying module behind the name to some python object. - Returns the module and the object name (original name with module part removed).""" - - # allow convenience shorthands, substitute them by full names - obj_name = re.sub("^np.", "numpy.", obj_name) - obj_name = re.sub("^tf.", "tensorflow.", obj_name) - - # list alternatives for (module_name, local_obj_name) - parts = obj_name.split(".") - name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) - for i in range(len(parts), 0, -1)] - - # try each alternative in turn - for module_name, local_obj_name in name_pairs: - try: - module = importlib.import_module( - module_name) # may raise ImportError - # may raise AttributeError - get_obj_from_module(module, local_obj_name) - return module, local_obj_name - except: - pass - - # maybe some of the modules themselves contain errors? - for module_name, _local_obj_name in name_pairs: - try: - importlib.import_module(module_name) # may raise ImportError - except ImportError: - if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): - raise - - # maybe the requested attribute is missing? - for module_name, local_obj_name in name_pairs: - try: - module = importlib.import_module( - module_name) # may raise ImportError - # may raise AttributeError - get_obj_from_module(module, local_obj_name) - except ImportError: - pass - - # we are out of luck, but we have no idea why - raise ImportError(obj_name) - - -def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: - """Traverses the object name and returns the last (rightmost) python object.""" - if obj_name == '': - return module - obj = module - for part in obj_name.split("."): - obj = getattr(obj, part) - return obj - - -def get_obj_by_name(name: str) -> Any: - """Finds the python object with the given name.""" - module, obj_name = get_module_from_obj_name(name) - return get_obj_from_module(module, obj_name) - - -def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: - """Finds the python object with the given name and calls it as a function.""" - assert func_name is not None - # print('func_name: ', func_name) #'training.dataset.ImageFolderDataset' - func_obj = get_obj_by_name(func_name) - assert callable(func_obj) - return func_obj(*args, **kwargs) - - -def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any: - """Finds the python class with the given name and constructs it with the given arguments.""" - return call_func_by_name(*args, func_name=class_name, **kwargs) - - -def get_module_dir_by_obj_name(obj_name: str) -> str: - """Get the directory path of the module containing the given object name.""" - module, _ = get_module_from_obj_name(obj_name) - return os.path.dirname(inspect.getfile(module)) - - -def is_top_level_function(obj: Any) -> bool: - """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" - return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ - - -def get_top_level_function_name(obj: Any) -> str: - """Return the fully-qualified name of a top-level function.""" - assert is_top_level_function(obj) - module = obj.__module__ - if module == '__main__': - module = os.path.splitext(os.path.basename( - sys.modules[module].__file__))[0] - return module + "." + obj.__name__ - - -# File system helpers -# ------------------------------------------------------------------------------------------ - -def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: - """List all files recursively in a given directory while ignoring given file and directory names. - Returns list of tuples containing both absolute and relative paths.""" - assert os.path.isdir(dir_path) - base_name = os.path.basename(os.path.normpath(dir_path)) - - if ignores is None: - ignores = [] - - result = [] - - for root, dirs, files in os.walk(dir_path, topdown=True): - for ignore_ in ignores: - dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] - - # dirs need to be edited in-place - for d in dirs_to_remove: - dirs.remove(d) - - files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] - - absolute_paths = [os.path.join(root, f) for f in files] - relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] - - if add_base_to_relative: - relative_paths = [os.path.join(base_name, p) - for p in relative_paths] - - assert len(absolute_paths) == len(relative_paths) - result += zip(absolute_paths, relative_paths) - - return result - - -def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: - """Takes in a list of tuples of (src, dst) paths and copies files. - Will create all necessary directories.""" - for file in files: - target_dir_name = os.path.dirname(file[1]) - - # will create all intermediate-level directories - if not os.path.exists(target_dir_name): - os.makedirs(target_dir_name) - - shutil.copyfile(file[0], file[1]) - - -# URL helpers -# ------------------------------------------------------------------------------------------ - -def is_url(obj: Any, allow_file_urls: bool = False) -> bool: - """Determine whether the given object is a valid URL string.""" - if not isinstance(obj, str) or not "://" in obj: - return False - if allow_file_urls and obj.startswith('file://'): - return True - try: - res = requests.compat.urlparse(obj) - if not res.scheme or not res.netloc or not "." in res.netloc: - return False - res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) - if not res.scheme or not res.netloc or not "." in res.netloc: - return False - except: - return False - return True - - -def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any: - """Download the given URL and return a binary-mode file object to access the data.""" - assert num_attempts >= 1 - assert not (return_filename and (not cache)) - - # Doesn't look like an URL scheme so interpret it as a local filename. - if not re.match('^[a-z]+://', url): - return url if return_filename else open(url, "rb") - - # Handle file URLs. This code handles unusual file:// patterns that - # arise on Windows: - # - # file:///c:/foo.txt - # - # which would translate to a local '/c:/foo.txt' filename that's - # invalid. Drop the forward slash for such pathnames. - # - # If you touch this code path, you should test it on both Linux and - # Windows. - # - # Some internet resources suggest using urllib.request.url2pathname() but - # but that converts forward slashes to backslashes and this causes - # its own set of problems. - if url.startswith('file://'): - filename = urllib.parse.urlparse(url).path - if re.match(r'^/[a-zA-Z]:', filename): - filename = filename[1:] - return filename if return_filename else open(filename, "rb") - - assert is_url(url) - - # Lookup from cache. - if cache_dir is None: - cache_dir = make_cache_dir_path('downloads') - - url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() - if cache: - cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) - if len(cache_files) == 1: - filename = cache_files[0] - return filename if return_filename else open(filename, "rb") - - # Download. - url_name = None - url_data = None - with requests.Session() as session: - if verbose: - print("Downloading %s ..." % url, end="", flush=True) - for attempts_left in reversed(range(num_attempts)): - try: - with session.get(url) as res: - res.raise_for_status() - if len(res.content) == 0: - raise IOError("No data received") - - if len(res.content) < 8192: - content_str = res.content.decode("utf-8") - if "download_warning" in res.headers.get("Set-Cookie", ""): - links = [html.unescape(link) for link in content_str.split( - '"') if "export=download" in link] - if len(links) == 1: - url = requests.compat.urljoin(url, links[0]) - raise IOError("Google Drive virus checker nag") - if "Google Drive - Quota exceeded" in content_str: - raise IOError( - "Google Drive download quota exceeded -- please try again later") - - match = re.search( - r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) - url_name = match[1] if match else url - url_data = res.content - if verbose: - print(" done") - break - except KeyboardInterrupt: - raise - except: - if not attempts_left: - if verbose: - print(" failed") - raise - if verbose: - print(".", end="", flush=True) - - # Save to cache. - if cache: - safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) - cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) - temp_file = os.path.join( - cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) - os.makedirs(cache_dir, exist_ok=True) - with open(temp_file, "wb") as f: - f.write(url_data) - os.replace(temp_file, cache_file) # atomic - if return_filename: - return cache_file - - # Return data as file object. - assert not return_filename - return io.BytesIO(url_data) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py deleted file mode 100644 index b4c0387ca01be42a60056184d802c362a06c5139..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +++ /dev/null @@ -1,532 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings -from functools import partial -from typing import Dict, List, Optional, Union - -import jax -import jax.numpy as jnp -import numpy as np -from flax.core.frozen_dict import FrozenDict -from flax.jax_utils import unreplicate -from flax.training.common_utils import shard -from PIL import Image -from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel - -from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel -from ...schedulers import ( - FlaxDDIMScheduler, - FlaxDPMSolverMultistepScheduler, - FlaxLMSDiscreteScheduler, - FlaxPNDMScheduler, -) -from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring -from ..pipeline_flax_utils import FlaxDiffusionPipeline -from . import FlaxStableDiffusionPipelineOutput -from .safety_checker_flax import FlaxStableDiffusionSafetyChecker - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -# Set to True to use python for loop instead of jax.fori_loop for easier debugging -DEBUG = False - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> import jax - >>> import numpy as np - >>> import jax.numpy as jnp - >>> from flax.jax_utils import replicate - >>> from flax.training.common_utils import shard - >>> import requests - >>> from io import BytesIO - >>> from PIL import Image - >>> from diffusers import FlaxStableDiffusionImg2ImgPipeline - - - >>> def create_key(seed=0): - ... return jax.random.PRNGKey(seed) - - - >>> rng = create_key(0) - - >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" - >>> response = requests.get(url) - >>> init_img = Image.open(BytesIO(response.content)).convert("RGB") - >>> init_img = init_img.resize((768, 512)) - - >>> prompts = "A fantasy landscape, trending on artstation" - - >>> pipeline, params = FlaxStableDiffusionImg2ImgPipeline.from_pretrained( - ... "CompVis/stable-diffusion-v1-4", - ... revision="flax", - ... dtype=jnp.bfloat16, - ... ) - - >>> num_samples = jax.device_count() - >>> rng = jax.random.split(rng, jax.device_count()) - >>> prompt_ids, processed_image = pipeline.prepare_inputs( - ... prompt=[prompts] * num_samples, image=[init_img] * num_samples - ... ) - >>> p_params = replicate(params) - >>> prompt_ids = shard(prompt_ids) - >>> processed_image = shard(processed_image) - - >>> output = pipeline( - ... prompt_ids=prompt_ids, - ... image=processed_image, - ... params=p_params, - ... prng_seed=rng, - ... strength=0.75, - ... num_inference_steps=50, - ... jit=True, - ... height=512, - ... width=768, - ... ).images - - >>> output_images = pipeline.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) - ``` -""" - - -class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline): - r""" - Flax-based pipeline for text-guided image-to-image generation using Stable Diffusion. - - This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - Args: - vae ([`FlaxAutoencoderKL`]): - Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. - text_encoder ([`~transformers.FlaxCLIPTextModel`]): - Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). - tokenizer ([`~transformers.CLIPTokenizer`]): - A `CLIPTokenizer` to tokenize text. - unet ([`FlaxUNet2DConditionModel`]): - A `FlaxUNet2DConditionModel` to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or - [`FlaxDPMSolverMultistepScheduler`]. - safety_checker ([`FlaxStableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details - about a model's potential harms. - feature_extractor ([`~transformers.CLIPImageProcessor`]): - A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. - """ - - def __init__( - self, - vae: FlaxAutoencoderKL, - text_encoder: FlaxCLIPTextModel, - tokenizer: CLIPTokenizer, - unet: FlaxUNet2DConditionModel, - scheduler: Union[ - FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler - ], - safety_checker: FlaxStableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - dtype: jnp.dtype = jnp.float32, - ): - super().__init__() - self.dtype = dtype - - if safety_checker is None: - logger.warn( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - - def prepare_inputs(self, prompt: Union[str, List[str]], image: Union[Image.Image, List[Image.Image]]): - if not isinstance(prompt, (str, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if not isinstance(image, (Image.Image, list)): - raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") - - if isinstance(image, Image.Image): - image = [image] - - processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) - - text_input = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="np", - ) - return text_input.input_ids, processed_images - - def _get_has_nsfw_concepts(self, features, params): - has_nsfw_concepts = self.safety_checker(features, params) - return has_nsfw_concepts - - def _run_safety_checker(self, images, safety_model_params, jit=False): - # safety_model_params should already be replicated when jit is True - pil_images = [Image.fromarray(image) for image in images] - features = self.feature_extractor(pil_images, return_tensors="np").pixel_values - - if jit: - features = shard(features) - has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) - has_nsfw_concepts = unshard(has_nsfw_concepts) - safety_model_params = unreplicate(safety_model_params) - else: - has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) - - images_was_copied = False - for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): - if has_nsfw_concept: - if not images_was_copied: - images_was_copied = True - images = images.copy() - - images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image - - if any(has_nsfw_concepts): - warnings.warn( - "Potential NSFW content was detected in one or more images. A black image will be returned" - " instead. Try again with a different prompt and/or seed." - ) - - return images, has_nsfw_concepts - - def get_timestep_start(self, num_inference_steps, strength): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - - return t_start - - def _generate( - self, - prompt_ids: jnp.array, - image: jnp.array, - params: Union[Dict, FrozenDict], - prng_seed: jax.random.KeyArray, - start_timestep: int, - num_inference_steps: int, - height: int, - width: int, - guidance_scale: float, - noise: Optional[jnp.array] = None, - neg_prompt_ids: Optional[jnp.array] = None, - ): - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - # get prompt text embeddings - prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] - - # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` - # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` - batch_size = prompt_ids.shape[0] - - max_length = prompt_ids.shape[-1] - - if neg_prompt_ids is None: - uncond_input = self.tokenizer( - [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" - ).input_ids - else: - uncond_input = neg_prompt_ids - negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] - context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) - - latents_shape = ( - batch_size, - self.unet.config.in_channels, - height // self.vae_scale_factor, - width // self.vae_scale_factor, - ) - if noise is None: - noise = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) - else: - if noise.shape != latents_shape: - raise ValueError(f"Unexpected latents shape, got {noise.shape}, expected {latents_shape}") - - # Create init_latents - init_latent_dist = self.vae.apply({"params": params["vae"]}, image, method=self.vae.encode).latent_dist - init_latents = init_latent_dist.sample(key=prng_seed).transpose((0, 3, 1, 2)) - init_latents = self.vae.config.scaling_factor * init_latents - - def loop_body(step, args): - latents, scheduler_state = args - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - latents_input = jnp.concatenate([latents] * 2) - - t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] - timestep = jnp.broadcast_to(t, latents_input.shape[0]) - - latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) - - # predict the noise residual - noise_pred = self.unet.apply( - {"params": params["unet"]}, - jnp.array(latents_input), - jnp.array(timestep, dtype=jnp.int32), - encoder_hidden_states=context, - ).sample - # perform guidance - noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) - noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() - return latents, scheduler_state - - scheduler_state = self.scheduler.set_timesteps( - params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape - ) - - latent_timestep = scheduler_state.timesteps[start_timestep : start_timestep + 1].repeat(batch_size) - - latents = self.scheduler.add_noise(params["scheduler"], init_latents, noise, latent_timestep) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * params["scheduler"].init_noise_sigma - - if DEBUG: - # run with python for loop - for i in range(start_timestep, num_inference_steps): - latents, scheduler_state = loop_body(i, (latents, scheduler_state)) - else: - latents, _ = jax.lax.fori_loop(start_timestep, num_inference_steps, loop_body, (latents, scheduler_state)) - - # scale and decode the image latents with vae - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample - - image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) - return image - - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt_ids: jnp.array, - image: jnp.array, - params: Union[Dict, FrozenDict], - prng_seed: jax.random.KeyArray, - strength: float = 0.8, - num_inference_steps: int = 50, - height: Optional[int] = None, - width: Optional[int] = None, - guidance_scale: Union[float, jnp.array] = 7.5, - noise: jnp.array = None, - neg_prompt_ids: jnp.array = None, - return_dict: bool = True, - jit: bool = False, - ): - r""" - The call function to the pipeline for generation. - - Args: - prompt_ids (`jnp.array`): - The prompt or prompts to guide image generation. - image (`jnp.array`): - Array representing an image batch to be used as the starting point. - params (`Dict` or `FrozenDict`): - Dictionary containing the model parameters/weights. - prng_seed (`jax.random.KeyArray` or `jax.Array`): - Array containing random number generator key. - strength (`float`, *optional*, defaults to 0.8): - Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a - starting point and more noise is added the higher the `strength`. The number of denoising steps depends - on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising - process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 - essentially ignores `image`. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. This parameter is modulated by `strength`. - height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. - guidance_scale (`float`, *optional*, defaults to 7.5): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - noise (`jnp.array`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. The array is generated by - sampling using the supplied random `generator`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of - a plain tuple. - jit (`bool`, defaults to `False`): - Whether to run `pmap` versions of the generation and safety scoring functions. - - - - This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a - future release. - - - - Examples: - - Returns: - [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is - returned, otherwise a `tuple` is returned where the first element is a list with the generated images - and the second element is a list of `bool`s indicating whether the corresponding generated image - contains "not-safe-for-work" (nsfw) content. - """ - # 0. Default height and width to unet - height = height or self.unet.config.sample_size * self.vae_scale_factor - width = width or self.unet.config.sample_size * self.vae_scale_factor - - if isinstance(guidance_scale, float): - # Convert to a tensor so each device gets a copy. Follow the prompt_ids for - # shape information, as they may be sharded (when `jit` is `True`), or not. - guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) - if len(prompt_ids.shape) > 2: - # Assume sharded - guidance_scale = guidance_scale[:, None] - - start_timestep = self.get_timestep_start(num_inference_steps, strength) - - if jit: - images = _p_generate( - self, - prompt_ids, - image, - params, - prng_seed, - start_timestep, - num_inference_steps, - height, - width, - guidance_scale, - noise, - neg_prompt_ids, - ) - else: - images = self._generate( - prompt_ids, - image, - params, - prng_seed, - start_timestep, - num_inference_steps, - height, - width, - guidance_scale, - noise, - neg_prompt_ids, - ) - - if self.safety_checker is not None: - safety_params = params["safety_checker"] - images_uint8_casted = (images * 255).round().astype("uint8") - num_devices, batch_size = images.shape[:2] - - images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) - images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) - images = np.asarray(images) - - # block images - if any(has_nsfw_concept): - for i, is_nsfw in enumerate(has_nsfw_concept): - if is_nsfw: - images[i] = np.asarray(images_uint8_casted[i]) - - images = images.reshape(num_devices, batch_size, height, width, 3) - else: - images = np.asarray(images) - has_nsfw_concept = False - - if not return_dict: - return (images, has_nsfw_concept) - - return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) - - -# Static argnums are pipe, start_timestep, num_inference_steps, height, width. A change would trigger recompilation. -# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). -@partial( - jax.pmap, - in_axes=(None, 0, 0, 0, 0, None, None, None, None, 0, 0, 0), - static_broadcasted_argnums=(0, 5, 6, 7, 8), -) -def _p_generate( - pipe, - prompt_ids, - image, - params, - prng_seed, - start_timestep, - num_inference_steps, - height, - width, - guidance_scale, - noise, - neg_prompt_ids, -): - return pipe._generate( - prompt_ids, - image, - params, - prng_seed, - start_timestep, - num_inference_steps, - height, - width, - guidance_scale, - noise, - neg_prompt_ids, - ) - - -@partial(jax.pmap, static_broadcasted_argnums=(0,)) -def _p_get_has_nsfw_concepts(pipe, features, params): - return pipe._get_has_nsfw_concepts(features, params) - - -def unshard(x: jnp.ndarray): - # einops.rearrange(x, 'd b ... -> (d b) ...') - num_devices, batch_size = x.shape[:2] - rest = x.shape[2:] - return x.reshape(num_devices * batch_size, *rest) - - -def preprocess(image, dtype): - w, h = image.size - w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 - image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) - image = jnp.array(image).astype(dtype) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - return 2.0 * image - 1.0 diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/pndm/test_pndm.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/pndm/test_pndm.py deleted file mode 100644 index c2595713933c6e8b10277910c1b01dd2f8561d25..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/pndm/test_pndm.py +++ /dev/null @@ -1,87 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch - -from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel -from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device - - -enable_full_determinism() - - -class PNDMPipelineFastTests(unittest.TestCase): - @property - def dummy_uncond_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model - - def test_inference(self): - unet = self.dummy_uncond_unet - scheduler = PNDMScheduler() - - pndm = PNDMPipeline(unet=unet, scheduler=scheduler) - pndm.to(torch_device) - pndm.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - image = pndm(generator=generator, num_inference_steps=20, output_type="numpy").images - - generator = torch.manual_seed(0) - image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="numpy", return_dict=False)[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - -@slow -@require_torch -class PNDMPipelineIntegrationTests(unittest.TestCase): - def test_inference_cifar10(self): - model_id = "google/ddpm-cifar10-32" - - unet = UNet2DModel.from_pretrained(model_id) - scheduler = PNDMScheduler() - - pndm = PNDMPipeline(unet=unet, scheduler=scheduler) - pndm.to(torch_device) - pndm.set_progress_bar_config(disable=None) - generator = torch.manual_seed(0) - image = pndm(generator=generator, output_type="numpy").images - - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index df9c2aca9c7c1999d74a08a58aca5d220f7df54a..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './nonlocal_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Anonymous-123/ImageNet-Editing/resize_obj.py b/spaces/Anonymous-123/ImageNet-Editing/resize_obj.py deleted file mode 100644 index e9315946d1da20b1756e43e762c47a641958701e..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/resize_obj.py +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/python -#****************************************************************# -# ScriptName: analysis_data.py -# Author: Anonymous_123 -# Create Date: 2022-07-25 19:54 -# Modify Author: Anonymous_123 -# Modify Date: 2022-09-25 12:04 -# Function: -#***************************************************************# - -import os -import sys -import numpy as np -import cv2 -import torch -from tqdm import tqdm -import shutil -import pdb - -import argparse - -parser = argparse.ArgumentParser(description='resize object') -parser.add_argument('--scale', type=float, default=None, help='object scale') -parser.add_argument('--img_path', type=str, help='image path') -parser.add_argument('--mask_path', type=str, help='mask path') - - -def get_bbox_and_rate(mask): - gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) - ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY) - contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - if len(contours) == 0: - return None, None - max_area = 0 - max_idx = 0 - for i, cnt in enumerate(contours): - x,y,w,h = cv2.boundingRect(cnt) - if w*h > max_area: - max_idx = i - max_area = w*h - # 外接矩形 - x,y,w,h = cv2.boundingRect(contours[max_idx]) - mask_new = np.zeros(mask.shape, dtype='uint8') - mask_new[y:y+h, x:x+w, :] = mask[y:y+h, x:x+w, :] - - rate = (mask_new[:,:,0]>127.5).sum()/mask.shape[0]/mask.shape[1] - - return (x,y,w,h), rate - -def resize_around_the_center(img, mask, bbox, operation, scale_step=1.2): - x,y,w,h = bbox - H,W,C = mask.shape - obj_mask = mask[y:y+h, x:x+w, :].copy() - # obj_mask = cv2.resize(obj_mask, (int(w*scale_step),int(h*scale_step)) if operation == 'upsample' else (int(w/scale_step), int(h/scale_step))) - obj_mask = cv2.resize(obj_mask, (int(w*scale_step),int(h*scale_step))) - start_point_x = max(x+w//2 - obj_mask.shape[1]//2, 0) # center - w - start_point_y = max(y+h//2 - obj_mask.shape[0]//2, 0) # center - h - end_point_x = min(x+w//2 + obj_mask.shape[1]//2, W) # center+w - end_point_y = min(y+h//2 + obj_mask.shape[0]//2, H) # center+h - - start_point_x_obj = max(0,obj_mask.shape[1]//2-(x+w//2)) - start_point_y_obj = max(0, obj_mask.shape[0]//2-(y+h//2)) - mask[:] = 0 - mask[start_point_y:end_point_y, start_point_x:end_point_x] = obj_mask[start_point_y_obj:start_point_y_obj+(end_point_y-start_point_y), start_point_x_obj:start_point_x_obj+(end_point_x-start_point_x)] - - obj_img = img[y:y+h, x:x+w, :].copy() - # obj_img = cv2.resize(obj_img, (int(w*scale_step),int(h*scale_step)) if operation == 'upsample' else (int(w/scale_step), int(h/scale_step))) - obj_img = cv2.resize(obj_img, (int(w*scale_step),int(h*scale_step))) - img = cv2.GaussianBlur(img, (49, 49), 0) - img[start_point_y:end_point_y, start_point_x:end_point_x] = obj_img[start_point_y_obj:start_point_y_obj+(end_point_y-start_point_y), start_point_x_obj:start_point_x_obj+(end_point_x-start_point_x)] - - return img, mask - -def resize_around_the_center_padding(img, mask, bbox, scale_step=1.2): - x,y,w,h = bbox - H,W,C = mask.shape - mask_new = np.zeros((int(H/scale_step), int(W/scale_step), 3), dtype='uint8') - mask_new_full = np.zeros((int(H/scale_step), int(W/scale_step), 3), dtype='uint8') - # img_new = np.zeros((int(H/scale_step), int(W/scale_step), 3), dtype='uint8') - img_new = cv2.resize(img, (int(W/scale_step), int(H/scale_step))) - - if scale_step < 1: - mask_new[int((y+h/2)*(1/scale_step-1)):int((y+h/2)*(1/scale_step-1)+H), int((x+w/2)*(1/scale_step-1)):int((x+w/2)*(1/scale_step-1)+W)] = mask - mask_new_full[int((y+h/2)*(1/scale_step-1)):int((y+h/2)*(1/scale_step-1)+H), int((x+w/2)*(1/scale_step-1)):int((x+w/2)*(1/scale_step-1)+W)] = mask.max()*np.ones(mask.shape, dtype='uint8') - - img_new[int((y+h/2)*(1/scale_step-1)):int((y+h/2)*(1/scale_step-1)+H), int((x+w/2)*(1/scale_step-1)):int((x+w/2)*(1/scale_step-1)+W)] = img - - else: - mask_new = mask[int((y+h/2)*(1-1/scale_step)):int((y+h/2)*(1-1/scale_step))+int(H/scale_step), int((x+w/2)*(1-1/scale_step)):int((x+w/2)*(1-1/scale_step))+int(W/scale_step)] - mask_new_full = mask[int((y+h/2)*(1-1/scale_step)):int((y+h/2)*(1-1/scale_step))+int(H/scale_step), int((x+w/2)*(1-1/scale_step)):int((x+w/2)*(1-1/scale_step))+int(W/scale_step)] - img_new = img[int((y+h/2)*(1-1/scale_step)):int((y+h/2)*(1-1/scale_step))+int(H/scale_step), int((x+w/2)*(1-1/scale_step)):int((x+w/2)*(1-1/scale_step))+int(W/scale_step)] - - img_new = cv2.resize(img_new, (W,H)) - mask_new = cv2.resize(mask_new, (W,H)) - mask_new_full = cv2.resize(mask_new_full, (W,H)) - - return img_new, mask_new, mask_new_full - -def rescale(img, mask, scale=None, max_steps=50): - bbox, rate = get_bbox_and_rate(mask) - if bbox is None: - return None, None, None - num_steps = 0 - mask_full = mask.copy() - while np.floor(rate*100) != scale*100. and abs(rate-scale) > 0.015: - # while not (abs(bbox[0]-0)<10 or abs(bbox[1]-0)<10 or abs(bbox[0]+bbox[2]-img.shape[1])<10 or abs(bbox[1]+bbox[3]-img.shape[0])<10): - operation = 'upsample' if np.floor(rate*100) < scale*100. else 'downsample' - scale_step = np.sqrt(scale/rate) - # img, mask = resize_around_the_center(img, mask, bbox, operation, scale_step=scale_step) - img, mask, mask_full = resize_around_the_center_padding(img, mask, bbox, scale_step=scale_step) - bbox, rate_ = get_bbox_and_rate(mask) - if (operation == 'upsample' and rate_ < rate) or (operation == 'downsample' and rate_ > rate): - return None, None, None - num_steps += 1 - rate = rate_ - print(rate) - if num_steps > max_steps: - return None, None, None - return img, mask_full, mask - - -def rescale_maximum(img, mask, scale=None, max_steps=50): - bbox, rate = get_bbox_and_rate(mask) - if bbox is None: - return None, None, None - x,y,w,h = bbox - H,W,C = img.shape - if H/h < W/w: - y_start, y_end = y, y+h - new_w = w/H*h - c_x = x + w//2 - c_x_new = new_w*c_x/W - x_start = c_x - c_x_new - x_end = x_start + new_w - else: - x_start, x_end = x, x+w - new_h = h/W*w - c_y = y+h//2 - c_y_new = new_h*c_y/H - y_start = c_y - c_y_new - y_end = y_start + new_h - img_new = img[min(y, int(y_start)):max(int(y_end), y+h), min(x, int(x_start)):max(int(x_end),x+w), :] - mask_new = mask[min(y, int(y_start)):max(int(y_end),y+h),min(x, int(x_start)):max(int(x_end),x+w),:] - - img_new = cv2.resize(img_new, (W,H)) - mask_new = cv2.resize(mask_new, (W,H)) - - return img_new, mask_new, mask_new - - -if __name__ == '__main__': - args = parser.parse_args() - scale = args.scale - img_path_save = 'results/img_rescaled.png' - mask_path_save = 'results/mask_rescaled.png' - if scale == None: - shutil.copy(args.img_path, img_path_save) - shutil.copy(args.mask_path, mask_path_save) - else: - try: - finals = [] - img = cv2.imread(args.img_path) - mask = cv2.imread(args.mask_path) - - img_rescale, mask_rescale, mask_obj = rescale_maximum(img.copy(), mask.copy(), scale=scale) - bbox, max_rate = get_bbox_and_rate(mask_obj) - if scale < max_rate: - img_rescale, mask_rescale, mask_obj = rescale(img.copy(), mask.copy(), scale=scale) - if img_rescale is None: - print('Invalid size') - shutil.copy(args.img_path, img_path_save) - shutil.copy(args.mask_path, mask_path_save) - sys.exit() - final = [img, img_rescale, mask, mask_rescale, mask_obj] - # cv2.imwrite('tmp.png', cv2.hconcat(final)) - - cv2.imwrite(img_path_save, img_rescale) - cv2.imwrite(mask_path_save, mask_obj) - # cv2.imwrite(mask_path_save_full, mask_rescale) - except: - print('Invalid size, using the original one') - shutil.copy(args.img_path, img_path_save) - shutil.copy(args.mask_path, mask_path_save) - - - - - diff --git a/spaces/Apex-X/GODROOP/roop/ui.py b/spaces/Apex-X/GODROOP/roop/ui.py deleted file mode 100644 index 684fd57ee6b0e1417b9cba1ad14246e7396b5876..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/GODROOP/roop/ui.py +++ /dev/null @@ -1,232 +0,0 @@ -import os -import webbrowser -import customtkinter as ctk -from typing import Callable, Tuple -import cv2 -from PIL import Image, ImageOps - -import roop.globals -import roop.metadata -from roop.face_analyser import get_one_face -from roop.capturer import get_video_frame, get_video_frame_total -from roop.predictor import predict_frame -from roop.processors.frame.core import get_frame_processors_modules -from roop.utilities import is_image, is_video, resolve_relative_path - -ROOT = None -ROOT_HEIGHT = 700 -ROOT_WIDTH = 600 - -PREVIEW = None -PREVIEW_MAX_HEIGHT = 700 -PREVIEW_MAX_WIDTH = 1200 - -RECENT_DIRECTORY_SOURCE = None -RECENT_DIRECTORY_TARGET = None -RECENT_DIRECTORY_OUTPUT = None - -preview_label = None -preview_slider = None -source_label = None -target_label = None -status_label = None - - -def init(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk: - global ROOT, PREVIEW - - ROOT = create_root(start, destroy) - PREVIEW = create_preview(ROOT) - - return ROOT - - -def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk: - global source_label, target_label, status_label - - ctk.deactivate_automatic_dpi_awareness() - ctk.set_appearance_mode('system') - ctk.set_default_color_theme(resolve_relative_path('ui.json')) - - root = ctk.CTk() - root.minsize(ROOT_WIDTH, ROOT_HEIGHT) - root.title(f'{roop.metadata.name} {roop.metadata.version}') - root.configure() - root.protocol('WM_DELETE_WINDOW', lambda: destroy()) - - source_label = ctk.CTkLabel(root, text=None) - source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25) - - target_label = ctk.CTkLabel(root, text=None) - target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25) - - source_button = ctk.CTkButton(root, text='Select a face', cursor='hand2', command=lambda: select_source_path()) - source_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1) - - target_button = ctk.CTkButton(root, text='Select a target', cursor='hand2', command=lambda: select_target_path()) - target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1) - - keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps) - keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps)) - keep_fps_checkbox.place(relx=0.1, rely=0.6) - - keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames) - keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get())) - keep_frames_switch.place(relx=0.1, rely=0.65) - - keep_audio_value = ctk.BooleanVar(value=roop.globals.keep_audio) - keep_audio_switch = ctk.CTkSwitch(root, text='Keep audio', variable=keep_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_audio', keep_audio_value.get())) - keep_audio_switch.place(relx=0.6, rely=0.6) - - many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces) - many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, cursor='hand2', command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get())) - many_faces_switch.place(relx=0.6, rely=0.65) - - start_button = ctk.CTkButton(root, text='Start', cursor='hand2', command=lambda: select_output_path(start)) - start_button.place(relx=0.15, rely=0.75, relwidth=0.2, relheight=0.05) - - stop_button = ctk.CTkButton(root, text='Destroy', cursor='hand2', command=lambda: destroy()) - stop_button.place(relx=0.4, rely=0.75, relwidth=0.2, relheight=0.05) - - preview_button = ctk.CTkButton(root, text='Preview', cursor='hand2', command=lambda: toggle_preview()) - preview_button.place(relx=0.65, rely=0.75, relwidth=0.2, relheight=0.05) - - status_label = ctk.CTkLabel(root, text=None, justify='center') - status_label.place(relx=0.1, rely=0.9, relwidth=0.8) - - donate_label = ctk.CTkLabel(root, text='^_^ Donate to project ^_^', justify='center', cursor='hand2') - donate_label.place(relx=0.1, rely=0.95, relwidth=0.8) - donate_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color')) - donate_label.bind('