"
- else:
- arg = repr(arg)
- text = "%s %s" % (text, arg)
- return text
-
- indent = " " * 4
-
- cur_lineno = bytecode.first_lineno
- prev_lineno = None
-
- if isinstance(bytecode, ConcreteBytecode):
- offset = 0
- for instr in bytecode:
- fields = []
- if instr.lineno is not None:
- cur_lineno = instr.lineno
- if lineno:
- fields.append(format_instr(instr))
- line = "".join(fields)
- line = format_line(offset, line)
- else:
- fields.append("% 3s %s" % (offset, format_instr(instr)))
- line = "".join(fields)
- print(line, file=stream)
-
- offset += instr.size
- elif isinstance(bytecode, Bytecode):
- labels = {}
- for index, instr in enumerate(bytecode):
- if isinstance(instr, Label):
- labels[instr] = "label_instr%s" % index
-
- for index, instr in enumerate(bytecode):
- if isinstance(instr, Label):
- label = labels[instr]
- line = "%s:" % label
- if index != 0:
- print(file=stream)
- else:
- if instr.lineno is not None:
- cur_lineno = instr.lineno
- line = format_instr(instr, labels)
- line = indent + format_line(index, line)
- print(line, file=stream)
- print(file=stream)
- elif isinstance(bytecode, ControlFlowGraph):
- labels = {}
- for block_index, block in enumerate(bytecode, 1):
- labels[id(block)] = "block%s" % block_index
-
- for block_index, block in enumerate(bytecode, 1):
- print("%s:" % labels[id(block)], file=stream)
- prev_lineno = None
- for index, instr in enumerate(block):
- if instr.lineno is not None:
- cur_lineno = instr.lineno
- line = format_instr(instr, labels)
- line = indent + format_line(index, line)
- print(line, file=stream)
- if block.next_block is not None:
- print(indent + "-> %s" % labels[id(block.next_block)], file=stream)
- print(file=stream)
- else:
- raise TypeError("unknown bytecode class")
diff --git a/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/torch_utils.py b/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/torch_utils.py
deleted file mode 100644
index 5e614b369a631d8829b5e189647f675aea1d2f46..0000000000000000000000000000000000000000
--- a/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/torch_utils.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import warnings
-
-import torch
-from torch import Tensor
-
-
-@torch.jit.script
-def torch_linspace(start: Tensor, stop: Tensor, num: int) -> torch.Tensor:
- """
- Copy-pasted from https://github.com/pytorch/pytorch/issues/61292
- Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive.
- Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch.
- """
- # create a tensor of 'num' steps from 0 to 1
- steps = torch.arange(num, dtype=torch.float32, device=start.device) / (num - 1)
-
- # reshape the 'steps' tensor to [-1, *([1]*start.ndim)] to allow for broadcastings
- # - using 'steps.reshape([-1, *([1]*start.ndim)])' would be nice here but torchscript
- # "cannot statically infer the expected size of a list in this contex", hence the code below
- for i in range(start.ndim):
- steps = steps.unsqueeze(-1)
-
- # the output starts at 'start' and increments until 'stop' in each dimension
- out = start[None] + steps * (stop - start)[None]
-
- return out
-
-
-def load_weights(
- model: torch.nn.Module, checkpoint: dict, strict=True
-) -> torch.nn.Module:
- """This function is used instead of the one provided by pytorch lightning
- because for unexplained reasons, the pytorch lightning load function did
- not behave as intended: loading several times from the same checkpoint
- resulted in different loaded weight values...
-
- Args:
- model: a model in which new weights should be set
- checkpoint: a loaded pytorch checkpoint (probably resulting from torch.load(filename))
- strict: Default to True, wether to fail if
-
- """
- if not strict:
- model_dict = model.state_dict()
- pretrained_dict = {
- k: v for k, v in checkpoint["state_dict"].items() if k in model_dict
- }
- diff1 = checkpoint["state_dict"].keys() - model_dict.keys()
- if diff1:
- warnings.warn(
- f"Found keys {diff1} in checkpoint without any match in the model, ignoring corresponding values."
- )
- diff2 = model_dict.keys() - checkpoint["state_dict"].keys()
- if diff2:
- warnings.warn(
- f"Missing keys {diff2} from the checkpoint, the corresponding weights will keep their initial values."
- )
- pretrained_dict = {
- k: v for k, v in checkpoint["state_dict"].items() if k in model_dict
- }
- model_dict.update(pretrained_dict)
- else:
- model_dict = checkpoint["state_dict"]
-
- model.load_state_dict(model_dict, strict=strict)
- return model
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py
deleted file mode 100644
index ddfcf7f72f31658d75c8128de0732fbbf0e12b15..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyproject_hooks/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""Wrappers to call pyproject.toml-based build backend hooks.
-"""
-
-from ._impl import (
- BackendInvalid,
- BackendUnavailable,
- BuildBackendHookCaller,
- HookMissing,
- UnsupportedOperation,
- default_subprocess_runner,
- quiet_subprocess_runner,
-)
-
-__version__ = '1.0.0'
-__all__ = [
- 'BackendUnavailable',
- 'BackendInvalid',
- 'HookMissing',
- 'UnsupportedOperation',
- 'default_subprocess_runner',
- 'quiet_subprocess_runner',
- 'BuildBackendHookCaller',
-]
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_anchor_generator.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_anchor_generator.py
deleted file mode 100644
index 13a808e587382216da6fe7ee957603f448172657..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_anchor_generator.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import logging
-import unittest
-import torch
-
-from detectron2.config import get_cfg
-from detectron2.layers import ShapeSpec
-from detectron2.modeling.anchor_generator import DefaultAnchorGenerator, RotatedAnchorGenerator
-
-logger = logging.getLogger(__name__)
-
-
-class TestAnchorGenerator(unittest.TestCase):
- def test_default_anchor_generator(self):
- cfg = get_cfg()
- cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
- cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
-
- anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)])
-
- # only the last two dimensions of features matter here
- num_images = 2
- features = {"stage3": torch.rand(num_images, 96, 1, 2)}
- anchors = anchor_generator([features["stage3"]])
- expected_anchor_tensor = torch.tensor(
- [
- [-32.0, -8.0, 32.0, 8.0],
- [-16.0, -16.0, 16.0, 16.0],
- [-8.0, -32.0, 8.0, 32.0],
- [-64.0, -16.0, 64.0, 16.0],
- [-32.0, -32.0, 32.0, 32.0],
- [-16.0, -64.0, 16.0, 64.0],
- [-28.0, -8.0, 36.0, 8.0], # -28.0 == -32.0 + STRIDE (4)
- [-12.0, -16.0, 20.0, 16.0],
- [-4.0, -32.0, 12.0, 32.0],
- [-60.0, -16.0, 68.0, 16.0],
- [-28.0, -32.0, 36.0, 32.0],
- [-12.0, -64.0, 20.0, 64.0],
- ]
- )
-
- self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
-
- def test_default_anchor_generator_centered(self):
- # test explicit args
- anchor_generator = DefaultAnchorGenerator(
- sizes=[32, 64], aspect_ratios=[0.25, 1, 4], strides=[4]
- )
-
- # only the last two dimensions of features matter here
- num_images = 2
- features = {"stage3": torch.rand(num_images, 96, 1, 2)}
- expected_anchor_tensor = torch.tensor(
- [
- [-30.0, -6.0, 34.0, 10.0],
- [-14.0, -14.0, 18.0, 18.0],
- [-6.0, -30.0, 10.0, 34.0],
- [-62.0, -14.0, 66.0, 18.0],
- [-30.0, -30.0, 34.0, 34.0],
- [-14.0, -62.0, 18.0, 66.0],
- [-26.0, -6.0, 38.0, 10.0],
- [-10.0, -14.0, 22.0, 18.0],
- [-2.0, -30.0, 14.0, 34.0],
- [-58.0, -14.0, 70.0, 18.0],
- [-26.0, -30.0, 38.0, 34.0],
- [-10.0, -62.0, 22.0, 66.0],
- ]
- )
-
- anchors = anchor_generator([features["stage3"]])
- self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
-
- anchors = torch.jit.script(anchor_generator)([features["stage3"]])
- self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
-
- def test_rrpn_anchor_generator(self):
- cfg = get_cfg()
- cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
- cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
- cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [0, 45] # test single list[float]
- anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)])
-
- # only the last two dimensions of features matter here
- num_images = 2
- features = {"stage3": torch.rand(num_images, 96, 1, 2)}
- anchors = anchor_generator([features["stage3"]])
- expected_anchor_tensor = torch.tensor(
- [
- [0.0, 0.0, 64.0, 16.0, 0.0],
- [0.0, 0.0, 64.0, 16.0, 45.0],
- [0.0, 0.0, 32.0, 32.0, 0.0],
- [0.0, 0.0, 32.0, 32.0, 45.0],
- [0.0, 0.0, 16.0, 64.0, 0.0],
- [0.0, 0.0, 16.0, 64.0, 45.0],
- [0.0, 0.0, 128.0, 32.0, 0.0],
- [0.0, 0.0, 128.0, 32.0, 45.0],
- [0.0, 0.0, 64.0, 64.0, 0.0],
- [0.0, 0.0, 64.0, 64.0, 45.0],
- [0.0, 0.0, 32.0, 128.0, 0.0],
- [0.0, 0.0, 32.0, 128.0, 45.0],
- [4.0, 0.0, 64.0, 16.0, 0.0], # 4.0 == 0.0 + STRIDE (4)
- [4.0, 0.0, 64.0, 16.0, 45.0],
- [4.0, 0.0, 32.0, 32.0, 0.0],
- [4.0, 0.0, 32.0, 32.0, 45.0],
- [4.0, 0.0, 16.0, 64.0, 0.0],
- [4.0, 0.0, 16.0, 64.0, 45.0],
- [4.0, 0.0, 128.0, 32.0, 0.0],
- [4.0, 0.0, 128.0, 32.0, 45.0],
- [4.0, 0.0, 64.0, 64.0, 0.0],
- [4.0, 0.0, 64.0, 64.0, 45.0],
- [4.0, 0.0, 32.0, 128.0, 0.0],
- [4.0, 0.0, 32.0, 128.0, 45.0],
- ]
- )
-
- self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/ThirdEyeData/Network_Data_Anomaly/README.md b/spaces/ThirdEyeData/Network_Data_Anomaly/README.md
deleted file mode 100644
index 2331ac816a9361eff4741f46123f44b7d0570710..0000000000000000000000000000000000000000
--- a/spaces/ThirdEyeData/Network_Data_Anomaly/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Network Data Anomaly
-emoji: 🐢
-colorFrom: yellow
-colorTo: red
-sdk: streamlit
-sdk_version: 1.15.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/style.css b/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/style.css
deleted file mode 100644
index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000
--- a/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI/style.css
+++ /dev/null
@@ -1,3 +0,0 @@
-h1 {
- text-align: center;
-}
diff --git a/spaces/TushDeMort/yolo/weights.py b/spaces/TushDeMort/yolo/weights.py
deleted file mode 100644
index 98e3bb4253e7be0465048f9748b58edc6d1c5b4a..0000000000000000000000000000000000000000
--- a/spaces/TushDeMort/yolo/weights.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import requests
-import os.path
-
-def weights():
-
- path = 'models/yolov7.pt'
-
- check_file = os.path.isfile(path)
-
- if check_file == True:
- pass
- else:
- URL = "https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt"
- response = requests.get(URL)
- open(path, "wb").write(response.content)
-
-if __name__ == "__main__":
- try:
- weights()
- except Exception as e:
- print(f'error is {e}')
diff --git a/spaces/TwoCH4/White-box-Cartoonization/wbc/cartoonize.py b/spaces/TwoCH4/White-box-Cartoonization/wbc/cartoonize.py
deleted file mode 100644
index 25faf1ceb95aaed9a3f7a7982d17a03dc6bc32b1..0000000000000000000000000000000000000000
--- a/spaces/TwoCH4/White-box-Cartoonization/wbc/cartoonize.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import os
-import cv2
-import numpy as np
-import tensorflow as tf
-import wbc.network as network
-import wbc.guided_filter as guided_filter
-from tqdm import tqdm
-
-
-def resize_crop(image):
- h, w, c = np.shape(image)
- if min(h, w) > 720:
- if h > w:
- h, w = int(720 * h / w), 720
- else:
- h, w = 720, int(720 * w / h)
- image = cv2.resize(image, (w, h),
- interpolation=cv2.INTER_AREA)
- h, w = (h // 8) * 8, (w // 8) * 8
- image = image[:h, :w, :]
- return image
-
-
-def cartoonize(load_folder, save_folder, model_path):
- print(model_path)
- input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
- network_out = network.unet_generator(input_photo)
- final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3)
-
- all_vars = tf.trainable_variables()
- gene_vars = [var for var in all_vars if 'generator' in var.name]
- saver = tf.train.Saver(var_list=gene_vars)
-
- config = tf.ConfigProto()
- config.gpu_options.allow_growth = True
- sess = tf.Session(config=config)
-
- sess.run(tf.global_variables_initializer())
- saver.restore(sess, tf.train.latest_checkpoint(model_path))
- name_list = os.listdir(load_folder)
- for name in tqdm(name_list):
- try:
- load_path = os.path.join(load_folder, name)
- save_path = os.path.join(save_folder, name)
- image = cv2.imread(load_path)
- image = resize_crop(image)
- batch_image = image.astype(np.float32) / 127.5 - 1
- batch_image = np.expand_dims(batch_image, axis=0)
- output = sess.run(final_out, feed_dict={input_photo: batch_image})
- output = (np.squeeze(output) + 1) * 127.5
- output = np.clip(output, 0, 255).astype(np.uint8)
- cv2.imwrite(save_path, output)
- except:
- print('cartoonize {} failed'.format(load_path))
-
-
-class Cartoonize:
- def __init__(self, model_path):
- print(model_path)
- self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
- network_out = network.unet_generator(self.input_photo)
- self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3)
-
- all_vars = tf.trainable_variables()
- gene_vars = [var for var in all_vars if 'generator' in var.name]
- saver = tf.train.Saver(var_list=gene_vars)
-
- config = tf.ConfigProto()
- config.gpu_options.allow_growth = True
- self.sess = tf.Session(config=config)
-
- self.sess.run(tf.global_variables_initializer())
- saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
-
- def run(self, load_folder, save_folder):
- name_list = os.listdir(load_folder)
- for name in tqdm(name_list):
- try:
- load_path = os.path.join(load_folder, name)
- save_path = os.path.join(save_folder, name)
- image = cv2.imread(load_path)
- image = resize_crop(image)
- batch_image = image.astype(np.float32) / 127.5 - 1
- batch_image = np.expand_dims(batch_image, axis=0)
- output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image})
- output = (np.squeeze(output) + 1) * 127.5
- output = np.clip(output, 0, 255).astype(np.uint8)
- cv2.imwrite(save_path, output)
- except:
- print('cartoonize {} failed'.format(load_path))
-
- def run_sigle(self, load_path, save_path):
- try:
- image = cv2.imread(load_path)
- image = resize_crop(image)
- batch_image = image.astype(np.float32) / 127.5 - 1
- batch_image = np.expand_dims(batch_image, axis=0)
- output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image})
- output = (np.squeeze(output) + 1) * 127.5
- output = np.clip(output, 0, 255).astype(np.uint8)
- cv2.imwrite(save_path, output)
- except:
- print('cartoonize {} failed'.format(load_path))
-
-
-if __name__ == '__main__':
- model_path = 'saved_models'
- load_folder = 'test_images'
- save_folder = 'cartoonized_images'
- if not os.path.exists(save_folder):
- os.mkdir(save_folder)
- cartoonize(load_folder, save_folder, model_path)
diff --git a/spaces/Ukrania/RVC-Models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/spaces/Ukrania/RVC-Models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py
deleted file mode 100644
index ee3171bcb7c4a5066560723108b56e055f18be45..0000000000000000000000000000000000000000
--- a/spaces/Ukrania/RVC-Models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
-import pyworld
-import numpy as np
-
-
-class DioF0Predictor(F0Predictor):
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
- self.hop_length = hop_length
- self.f0_min = f0_min
- self.f0_max = f0_max
- self.sampling_rate = sampling_rate
-
- def interpolate_f0(self, f0):
- """
- 对F0进行插值处理
- """
-
- data = np.reshape(f0, (f0.size, 1))
-
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
- vuv_vector[data > 0.0] = 1.0
- vuv_vector[data <= 0.0] = 0.0
-
- ip_data = data
-
- frame_number = data.size
- last_value = 0.0
- for i in range(frame_number):
- if data[i] <= 0.0:
- j = i + 1
- for j in range(i + 1, frame_number):
- if data[j] > 0.0:
- break
- if j < frame_number - 1:
- if last_value > 0.0:
- step = (data[j] - data[i - 1]) / float(j - i)
- for k in range(i, j):
- ip_data[k] = data[i - 1] + step * (k - i + 1)
- else:
- for k in range(i, j):
- ip_data[k] = data[j]
- else:
- for k in range(i, frame_number):
- ip_data[k] = last_value
- else:
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
- last_value = data[i]
-
- return ip_data[:, 0], vuv_vector[:, 0]
-
- def resize_f0(self, x, target_len):
- source = np.array(x)
- source[source < 0.001] = np.nan
- target = np.interp(
- np.arange(0, len(source) * target_len, len(source)) / target_len,
- np.arange(0, len(source)),
- source,
- )
- res = np.nan_to_num(target)
- return res
-
- def compute_f0(self, wav, p_len=None):
- if p_len is None:
- p_len = wav.shape[0] // self.hop_length
- f0, t = pyworld.dio(
- wav.astype(np.double),
- fs=self.sampling_rate,
- f0_floor=self.f0_min,
- f0_ceil=self.f0_max,
- frame_period=1000 * self.hop_length / self.sampling_rate,
- )
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
- for index, pitch in enumerate(f0):
- f0[index] = round(pitch, 1)
- return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
-
- def compute_f0_uv(self, wav, p_len=None):
- if p_len is None:
- p_len = wav.shape[0] // self.hop_length
- f0, t = pyworld.dio(
- wav.astype(np.double),
- fs=self.sampling_rate,
- f0_floor=self.f0_min,
- f0_ceil=self.f0_max,
- frame_period=1000 * self.hop_length / self.sampling_rate,
- )
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
- for index, pitch in enumerate(f0):
- f0[index] = round(pitch, 1)
- return self.interpolate_f0(self.resize_f0(f0, p_len))
diff --git a/spaces/Vasanthgx/Cats_vs_Dogs_vasanth/README.md b/spaces/Vasanthgx/Cats_vs_Dogs_vasanth/README.md
deleted file mode 100644
index f5e718271757ae76312a4976dfdc8b744fa240b9..0000000000000000000000000000000000000000
--- a/spaces/Vasanthgx/Cats_vs_Dogs_vasanth/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Cats Vs Dogs Vasanth
-emoji: 🚀
-colorFrom: green
-colorTo: gray
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Vegecken/sovits4dzl/modules/mel_processing.py b/spaces/Vegecken/sovits4dzl/modules/mel_processing.py
deleted file mode 100644
index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000
--- a/spaces/Vegecken/sovits4dzl/modules/mel_processing.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import math
-import os
-import random
-import torch
-from torch import nn
-import torch.nn.functional as F
-import torch.utils.data
-import numpy as np
-import librosa
-import librosa.util as librosa_util
-from librosa.util import normalize, pad_center, tiny
-from scipy.signal import get_window
-from scipy.io.wavfile import read
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/VideoCrafter/VideoCrafter/demo_test.py b/spaces/VideoCrafter/VideoCrafter/demo_test.py
deleted file mode 100644
index 759039753e6c8caa3eb5507690e68a07e50023c7..0000000000000000000000000000000000000000
--- a/spaces/VideoCrafter/VideoCrafter/demo_test.py
+++ /dev/null
@@ -1,16 +0,0 @@
-class Text2Video():
- def __init__(self, result_dir='./tmp/') -> None:
- pass
-
- def get_prompt(self, input_text, steps=50, cfg_scale=15.0, eta=1.0, fps=16):
-
- return '01.mp4'
-
-class Image2Video:
- def __init__(self, result_dir='./tmp/') -> None:
- pass
-
- def get_image(self, input_image, input_prompt, i2v_steps=50, i2v_cfg_scale=15.0, i2v_eta=1.0, i2v_fps=16):
-
- return '01.mp4'
-
\ No newline at end of file
diff --git a/spaces/Writer/token-counter/README.md b/spaces/Writer/token-counter/README.md
deleted file mode 100644
index 45b4051524dd319c765367c7c1c118f11157bbaf..0000000000000000000000000000000000000000
--- a/spaces/Writer/token-counter/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Token Counter
-emoji: 📈
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/general_optimizer.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/general_optimizer.py
deleted file mode 100644
index f6a0487d582fe6264627d302d6580364affdf754..0000000000000000000000000000000000000000
--- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/general_optimizer.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from .torch_core import *
-from torch.optim import Optimizer
-import types
-
-__all__ = ['StatScope', 'Statistic', 'ConstStatistic', 'AvgStatistic', 'AvgSquare', 'GeneralOptimizer']
-
-StatScope = Enum('StatScope', 'Global Group Layer Channel Weight')
-
-@dataclass
-class Statistic():
- name:str
- param:float=0.9 # e.g. for exp moving average
- scope:StatScope=StatScope.Weight
- init:float=0. # starting value
-
- @property
- def buf(self): return f'{self.name}_buffer'
-
- def new_step(self):
- "Set state when computing statistics for Global or Group"
- raise NotImplementedError
-
- def accumulate(self, val):
- "Add `val` to statistic"
- raise NotImplementedError
-
- def update(self, state, param, val=None, step=None):
- "Update state with accumlated, or `val` (if `Weight` or `Layer` scope)"
- raise NotImplementedError
-
-class ConstStatistic(Statistic):
- @property
- def buf(self): return None
- def new_step(self): pass
- def accumulate(self): pass
- def update(self, state, param, val=None, step=None): return param
-
-@dataclass
-class CounterStat(Statistic):
- def __post_init__(self): self.init,self._buf,self.name = 0,self.name,None
- @property
- def buf(self): return self._buf
- def new_step(self): pass
- def accumulate(self, val): pass
- def update(self, state, param, val=None, step=None): return state + 1
-
-@dataclass
-class AvgStatistic(Statistic):
- decay:bool=False
- debias:bool=False
- def new_step(self): self.val,self.count = 0.,0
-
- def accumulate(self, val):
- self.count += 1
- self.val += self._get_val1(val)
-
- def _get_val1(self, val): return val.mean()
- def _get_val2(self, state, val, param): return state.add_(1-param, val) if self.decay else state.add_(val)
- def _get_val3(self, state, val, param):
- v = val.view(val.size(0), -1).mean(1)
- return state.add_(1-param, v) if self.decay else state.add_(v)
-
- def update(self, state, param, val=None, step=None):
- if self.scope == StatScope.Weight:
- # `state` is a tensor
- res = self._get_val2(state.mul_(param), val, param)
- elif self.scope == StatScope.Channel:
- # `state` is a tensor of size n_channels
- res = self._get_val3(state.mul_(param), val, param)
- # For everything else, `state` is a scalar
- elif self.scope == StatScope.Layer: res = state*param + self._get_val1(val) * (1-param if self.decay else 1.)
- elif self.count != 0: res = state*param + self.val/self.count * (1-param if self.decay else 1.)
- else: return state
- if self.debias and step is not None: res /= (1 - param ** step)
- return res
-
-class AvgSquare(AvgStatistic):
-
- def __init__(self, name:str, param:float=0.9, scope=StatScope.Weight, init:float=0., decay:bool=True, debias:bool=False):
- super().__init__(name, param=param, scope=scope, init=init, decay=decay, debias=debias)
-
- def _get_val1(self, val): return torch.norm(val).pow(2)/val.numel()
- def _get_val2(self, state, val, param):
- return state.addcmul_(1-param, val, val) if self.decay else state.addcmul_(val, val)
- def _get_val3(self, state, val, param):
- v = val.view(val.size(0), -1).mean(1)
- return state.addcmul_(1-param, v, v) if self.decay else state.addcmul_(v, v)
-
-class GeneralOptimizer(Optimizer):
- def __init__(self, params, stats=None, on_step:Callable=None):
- defaults = {s.name:s.param for s in listify(stats) if s.name is not None}
- super().__init__(params, defaults)
- self.global_stats,self.group_stats,self.layer_stats,self.channel_stats,self.weight_stats = self._split_stats(stats)
- self.init_stats()
- if on_step is not None: self.on_step = types.MethodType(on_step, self)
-
- def step(self, closure=None):
- self.update_stats()
- for i,pg in enumerate(self.param_groups):
- for p in pg['params']:
- if p.grad is not None: self.on_step(p, pg, i)
-
- def on_step(self, p, group, group_idx): p.data.add_(-group['lr'], p.grad.data)
-
- def _split_stats(self, stats):
- splits = [[stat for stat in listify(stats) if stat.scope==scope] for scope in StatScope]
- for split,s in zip([splits[0], splits[1], splits[2]+splits[3]+splits[4]], StatScope):
- if np.any([getattr(s, 'debias', False) for s in split]): split.insert(0, CounterStat('step', scope=s))
- return splits
-
- def _init_stats(self, stats, data=None):
- return {stat.buf: stat.init if data is None
- else torch.zeros_like(data) + stat.init for stat in stats if stat.buf is not None}
-
- def init_stats(self):
- self.state['global'] = self._init_stats(self.global_stats)
- for i,pg in enumerate(self.param_groups):
- self.state[f'group{i}'] = self._init_stats(self.group_stats)
- for p in pg['params']:
- self.state[p] = self._init_stats(self.layer_stats)
- self.state[p].update(self._init_stats(self.channel_stats, p.data.view(p.data.size(0), -1).mean(1)))
- self.state[p].update(self._init_stats(self.weight_stats, p.data))
-
- def _set_bufs(self, p, stats, pg, val=None):
- d = self.state[p]
- for stat in stats:
- if stat.buf is not None: d[stat.buf] = stat.update(d[stat.buf], pg[stat.name], val=val, step=d.get('step', None))
-
- def update_stats(self):
- for stat in self.global_stats: stat.new_step()
- for i,pg in enumerate(self.param_groups):
- for stat in self.group_stats: stat.new_step()
- for p in pg['params']:
- if p.grad is not None:
- for stat in self.global_stats + self.group_stats: stat.accumulate(p.grad.data)
- self._set_bufs(p, self.layer_stats+self.channel_stats+self.weight_stats, pg, p.grad.data)
- self._set_bufs(f'group{i}', self.group_stats, pg)
- self._set_bufs('global', self.global_stats, self.param_groups[0])
-
diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/torch_core.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/torch_core.py
deleted file mode 100644
index 6b089e09e4e08c2b6d50b70ef3223fadae2f48cb..0000000000000000000000000000000000000000
--- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/torch_core.py
+++ /dev/null
@@ -1,430 +0,0 @@
-"Utility functions to help deal with tensors"
-from .imports.torch import *
-from .core import *
-from collections import OrderedDict
-from torch.nn.parallel import DistributedDataParallel
-
-AffineMatrix = Tensor
-BoolOrTensor = Union[bool,Tensor]
-FloatOrTensor = Union[float,Tensor]
-IntOrTensor = Union[int,Tensor]
-ItemsList = Collection[Union[Tensor,ItemBase,'ItemsList',float,int]]
-LambdaFunc = Callable[[Tensor],Tensor]
-LayerFunc = Callable[[nn.Module],None]
-ModuleList = Collection[nn.Module]
-NPArray = np.ndarray
-OptOptimizer = Optional[optim.Optimizer]
-ParamList = Collection[nn.Parameter]
-Rank0Tensor = NewType('OneEltTensor', Tensor)
-SplitFunc = Callable[[nn.Module], List[nn.Module]]
-SplitFuncOrIdxList = Union[Callable, Collection[ModuleList]]
-TensorOrNumber = Union[Tensor,Number]
-TensorOrNumList = Collection[TensorOrNumber]
-TensorImage = Tensor
-TensorImageSize = Tuple[int,int,int]
-Tensors = Union[Tensor, Collection['Tensors']]
-Weights = Dict[str,Tensor]
-
-AffineFunc = Callable[[KWArgs], AffineMatrix]
-HookFunc = Callable[[nn.Module, Tensors, Tensors], Any]
-LogitTensorImage = TensorImage
-LossFunction = Callable[[Tensor, Tensor], Rank0Tensor]
-MetricFunc = Callable[[Tensor,Tensor],TensorOrNumber]
-MetricFuncList = Collection[MetricFunc]
-MetricsList = Collection[TensorOrNumber]
-OptLossFunc = Optional[LossFunction]
-OptMetrics = Optional[MetricsList]
-OptSplitFunc = Optional[SplitFunc]
-PixelFunc = Callable[[TensorImage, ArgStar, KWArgs], TensorImage]
-
-LightingFunc = Callable[[LogitTensorImage, ArgStar, KWArgs], LogitTensorImage]
-
-fastai_types = {
- AnnealFunc:'AnnealFunc', ArgStar:'ArgStar', BatchSamples:'BatchSamples',
- FilePathList:'FilePathList', Floats:'Floats', ImgLabel:'ImgLabel', ImgLabels:'ImgLabels', KeyFunc:'KeyFunc',
- KWArgs:'KWArgs', ListOrItem:'ListOrItem', ListRules:'ListRules', ListSizes:'ListSizes',
- NPArrayableList:'NPArrayableList', NPArrayList:'NPArrayList', NPArrayMask:'NPArrayMask', NPImage:'NPImage',
- OptDataFrame:'OptDataFrame', OptListOrItem:'OptListOrItem', OptRange:'OptRange', OptStrTuple:'OptStrTuple',
- OptStats:'OptStats', PathOrStr:'PathOrStr', PBar:'PBar', Point:'Point', Points:'Points', Sizes:'Sizes',
- SplitArrayList:'SplitArrayList', StartOptEnd:'StartOptEnd', StrList:'StrList', Tokens:'Tokens',
- OptStrList:'OptStrList', AffineMatrix:'AffineMatrix', BoolOrTensor:'BoolOrTensor', FloatOrTensor:'FloatOrTensor',
- IntOrTensor:'IntOrTensor', ItemsList:'ItemsList', LambdaFunc:'LambdaFunc',
- LayerFunc:'LayerFunc', ModuleList:'ModuleList', OptOptimizer:'OptOptimizer', ParamList:'ParamList',
- Rank0Tensor:'Rank0Tensor', SplitFunc:'SplitFunc', SplitFuncOrIdxList:'SplitFuncOrIdxList',
- TensorOrNumber:'TensorOrNumber', TensorOrNumList:'TensorOrNumList', TensorImage:'TensorImage',
- TensorImageSize:'TensorImageSize', Tensors:'Tensors', Weights:'Weights', AffineFunc:'AffineFunc',
- HookFunc:'HookFunc', LogitTensorImage:'LogitTensorImage', LossFunction:'LossFunction', MetricFunc:'MetricFunc',
- MetricFuncList:'MetricFuncList', MetricsList:'MetricsList', OptLossFunc:'OptLossFunc', OptMetrics:'OptMetrics',
- OptSplitFunc:'OptSplitFunc', PixelFunc:'PixelFunc', LightingFunc:'LightingFunc', IntsOrStrs:'IntsOrStrs',
- PathLikeOrBinaryStream:'PathLikeOrBinaryStream'
-}
-
-bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
-bias_types = (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d)
-def is_pool_type(l:Callable): return re.search(r'Pool[123]d$', l.__class__.__name__)
-no_wd_types = bn_types + (nn.LayerNorm,)
-defaults.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
-AdamW = partial(optim.Adam, betas=(0.9,0.99))
-
-#Monkey-patch `torch.cuda.set_device` so that it updates `defaults.device`
-_old_torch_cuda_set_device = torch.cuda.set_device
-def _new_torch_cuda_set_device(device):
- _old_torch_cuda_set_device(device)
- defaults.device = torch.device('cuda', device) if isinstance(device, int) else device
-torch.cuda.set_device = _new_torch_cuda_set_device
-
-def tensor(x:Any, *rest)->Tensor:
- "Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly."
- if len(rest): x = (x,)+rest
- # XXX: Pytorch bug in dataloader using num_workers>0; TODO: create repro and report
- if is_listy(x) and len(x)==0: return tensor(0)
- res = torch.tensor(x) if is_listy(x) else as_tensor(x)
- if res.dtype is torch.int32:
- warn('Tensor is int32: upgrading to int64; for better performance use int64 input')
- return res.long()
- return res
-
-class Module(nn.Module, metaclass=PrePostInitMeta):
- "Same as `nn.Module`, but no need for subclasses to call `super().__init__`"
- def __pre_init__(self): super().__init__()
- def __init__(self): pass
-
-def np_address(x:np.ndarray)->int:
- "Address of `x` in memory."
- return x.__array_interface__['data'][0]
-
-def to_detach(b:Tensors, cpu:bool=True):
- "Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
- def _inner(x, cpu=True):
- if not isinstance(x,Tensor): return x
- x = x.detach()
- return x.cpu() if cpu else x
- return recurse(_inner, b, cpu=cpu)
-
-def to_data(b:ItemsList):
- "Recursively map lists of items in `b ` to their wrapped data."
- return recurse(lambda x: x.data if isinstance(x,ItemBase) else x, b)
-
-def to_cpu(b:ItemsList):
- "Recursively map lists of tensors in `b ` to the cpu."
- return recurse(lambda x: x.cpu() if isinstance(x,Tensor) else x, b)
-
-def to_half(b:Collection[Tensor])->Collection[Tensor]:
- "Recursively map lists of tensors in `b ` to FP16."
- return recurse(lambda x: x.half() if x.dtype not in [torch.int64, torch.int32, torch.int16] else x, b)
-
-def to_float(b:Collection[Tensor])->Collection[Tensor]:
- "Recursively map lists of tensors in `b ` to FP16."
- return recurse(lambda x: x.float() if x.dtype not in [torch.int64, torch.int32, torch.int16] else x, b)
-
-def to_device(b:Tensors, device:torch.device):
- "Recursively put `b` on `device`."
- device = ifnone(device, defaults.device)
- return recurse(lambda x: x.to(device, non_blocking=True), b)
-
-def data_collate(batch:ItemsList)->Tensor:
- "Convert `batch` items to tensor data."
- return torch.utils.data.dataloader.default_collate(to_data(batch))
-
-def requires_grad(m:nn.Module, b:Optional[bool]=None)->Optional[bool]:
- "If `b` is not set return `requires_grad` of first param, else set `requires_grad` on all params as `b`"
- ps = list(m.parameters())
- if not ps: return None
- if b is None: return ps[0].requires_grad
- for p in ps: p.requires_grad=b
-
-def trainable_params(m:nn.Module)->ParamList:
- "Return list of trainable params in `m`."
- res = filter(lambda p: p.requires_grad, m.parameters())
- return res
-
-def children(m:nn.Module)->ModuleList:
- "Get children of `m`."
- return list(m.children())
-
-def num_children(m:nn.Module)->int:
- "Get number of children modules in `m`."
- return len(children(m))
-
-def range_children(m:nn.Module)->Iterator[int]:
- "Return iterator of len of children of `m`."
- return range(num_children(m))
-
-class ParameterModule(Module):
- "Register a lone parameter `p` in a module."
- def __init__(self, p:nn.Parameter): self.val = p
- def forward(self, x): return x
-
-def children_and_parameters(m:nn.Module):
- "Return the children of `m` and its direct parameters not registered in modules."
- children = list(m.children())
- children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
- for p in m.parameters():
- if id(p) not in children_p: children.append(ParameterModule(p))
- return children
-
-def flatten_model(m:nn.Module):
- if num_children(m):
- mapped = map(flatten_model,children_and_parameters(m))
- return sum(mapped,[])
- else:
- return [m]
-
-#flatten_model = lambda m: sum(map(flatten_model,children_and_parameters(m)),[]) if num_children(m) else [m]
-
-def first_layer(m:nn.Module)->nn.Module:
- "Retrieve first layer in a module `m`."
- return flatten_model(m)[0]
-
-def last_layer(m:nn.Module)->nn.Module:
- "Retrieve last layer in a module `m`."
- return flatten_model(m)[-1]
-
-def split_model_idx(model:nn.Module, idxs:Collection[int])->ModuleList:
- "Split `model` according to the indexes in `idxs`."
- layers = flatten_model(model)
- if idxs[0] != 0: idxs = [0] + idxs
- if idxs[-1] != len(layers): idxs.append(len(layers))
- return [nn.Sequential(*layers[i:j]) for i,j in zip(idxs[:-1],idxs[1:])]
-
-def split_model(model:nn.Module=None, splits:Collection[Union[nn.Module,ModuleList]]=None):
- "Split `model` according to the layers in `splits`."
- splits = listify(splits)
- if isinstance(splits[0], nn.Module):
- layers = flatten_model(model)
- idxs = [layers.index(first_layer(s)) for s in splits]
- return split_model_idx(model, idxs)
- return [nn.Sequential(*s) for s in splits]
-
-def get_param_groups(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]:
- return [sum([list(trainable_params(c)) for c in l.children()], []) for l in layer_groups]
-
-def split_no_wd_params(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]:
- "Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest."
- split_params = []
- for l in layer_groups:
- l1,l2 = [],[]
- for c in l.children():
- if isinstance(c, no_wd_types): l2 += list(trainable_params(c))
- elif isinstance(c, bias_types):
- bias = c.bias if hasattr(c, 'bias') else None
- l1 += [p for p in trainable_params(c) if not (p is bias)]
- if bias is not None: l2.append(bias)
- else: l1 += list(trainable_params(c))
- #Since we scan the children separately, we might get duplicates (tied weights). We need to preserve the order
- #for the optimizer load of state_dict
- l1,l2 = uniqueify(l1),uniqueify(l2)
- split_params += [l1, l2]
- return split_params
-
-def set_bn_eval(m:nn.Module)->None:
- "Set bn layers in eval mode for all recursive children of `m`."
- for l in m.children():
- if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
- l.eval()
- set_bn_eval(l)
-
-def batch_to_half(b:Collection[Tensor])->Collection[Tensor]:
- "Set the input of batch `b` to half precision."
- return [to_half(b[0]), b[1]]
-
-def bn2float(module:nn.Module)->nn.Module:
- "If `module` is batchnorm don't use half precision."
- if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): module.float()
- for child in module.children(): bn2float(child)
- return module
-
-def model2half(model:nn.Module)->nn.Module:
- "Convert `model` to half precision except the batchnorm layers."
- return bn2float(model.half())
-
-def init_default(m:nn.Module, func:LayerFunc=nn.init.kaiming_normal_)->nn.Module:
- "Initialize `m` weights with `func` and set `bias` to 0."
- if func:
- if hasattr(m, 'weight'): func(m.weight)
- if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
- return m
-
-def cond_init(m:nn.Module, init_func:LayerFunc):
- "Initialize the non-batchnorm layers of `m` with `init_func`."
- if (not isinstance(m, bn_types)) and requires_grad(m): init_default(m, init_func)
-
-def apply_leaf(m:nn.Module, f:LayerFunc):
- "Apply `f` to children of `m`."
- c = children(m)
- if isinstance(m, nn.Module): f(m)
- for l in c: apply_leaf(l,f)
-
-def apply_init(m, init_func:LayerFunc):
- "Initialize all non-batchnorm layers of `m` with `init_func`."
- apply_leaf(m, partial(cond_init, init_func=init_func))
-
-def in_channels(m:nn.Module) -> List[int]:
- "Return the shape of the first weight layer in `m`."
- for l in flatten_model(m):
- if hasattr(l, 'weight'): return l.weight.shape[1]
- raise Exception('No weight layer')
-
-class ModelOnCPU():
- "A context manager to evaluate `model` on the CPU inside."
- def __init__(self, model:nn.Module): self.model = model
- def __enter__(self):
- self.device = one_param(self.model).device
- return self.model.cpu()
- def __exit__(self, type, value, traceback):
- self.model = self.model.to(self.device)
-
-class NoneReduceOnCPU():
- "A context manager to evaluate `loss_func` with none reduce and weights on the CPU inside."
- def __init__(self, loss_func:LossFunction):
- self.loss_func,self.device,self.old_red = loss_func,None,None
-
- def __enter__(self):
- if hasattr(self.loss_func, 'weight') and self.loss_func.weight is not None:
- self.device = self.loss_func.weight.device
- self.loss_func.weight = self.loss_func.weight.cpu()
- if hasattr(self.loss_func, 'reduction'):
- self.old_red = getattr(self.loss_func, 'reduction')
- setattr(self.loss_func, 'reduction', 'none')
- return self.loss_func
- else: return partial(self.loss_func, reduction='none')
-
- def __exit__(self, type, value, traceback):
- if self.device is not None: self.loss_func.weight = self.loss_func.weight.to(self.device)
- if self.old_red is not None: setattr(self.loss_func, 'reduction', self.old_red)
-
-def model_type(dtype):
- "Return the torch type corresponding to `dtype`."
- return (torch.float32 if np.issubdtype(dtype, np.floating) else
- torch.int64 if np.issubdtype(dtype, np.integer)
- else None)
-
-def np2model_tensor(a):
- "Tranform numpy array `a` to a tensor of the same type."
- dtype = model_type(a.dtype)
- res = as_tensor(a)
- if not dtype: return res
- return res.type(dtype)
-
-def _pca(x, k=2):
- "Compute PCA of `x` with `k` dimensions."
- x = x-torch.mean(x,0)
- U,S,V = torch.svd(x.t())
- return torch.mm(x,U[:,:k])
-torch.Tensor.pca = _pca
-
-def trange_of(x):
- "Create a tensor from `range_of(x)`."
- return torch.arange(len(x))
-
-def to_np(x):
- "Convert a tensor to a numpy array."
- return x.data.cpu().numpy()
-
-# monkey patching to allow matplotlib to plot tensors
-def tensor__array__(self, dtype=None):
- res = to_np(self)
- if dtype is None: return res
- else: return res.astype(dtype, copy=False)
-Tensor.__array__ = tensor__array__
-Tensor.ndim = property(lambda x: len(x.shape))
-
-def grab_idx(x,i,batch_first:bool=True):
- "Grab the `i`-th batch in `x`, `batch_first` stating the batch dimension."
- if batch_first: return ([o[i].cpu() for o in x] if is_listy(x) else x[i].cpu())
- else: return ([o[:,i].cpu() for o in x] if is_listy(x) else x[:,i].cpu())
-
-def logit(x:Tensor)->Tensor:
- "Logit of `x`, clamped to avoid inf."
- x = x.clamp(1e-7, 1-1e-7)
- return -(1/x-1).log()
-
-def logit_(x:Tensor)->Tensor:
- "Inplace logit of `x`, clamped to avoid inf"
- x.clamp_(1e-7, 1-1e-7)
- return (x.reciprocal_().sub_(1)).log_().neg_()
-
-def set_all_seed(seed:int)->None:
- "Sets the seeds for all pseudo random generators in fastai lib"
- np.random.seed(seed)
- torch.manual_seed(seed)
- random.seed(seed)
-
-def uniform(low:Number, high:Number=None, size:Optional[List[int]]=None)->FloatOrTensor:
- "Draw 1 or shape=`size` random floats from uniform dist: min=`low`, max=`high`."
- if high is None: high=low
- return random.uniform(low,high) if size is None else torch.FloatTensor(*listify(size)).uniform_(low,high)
-
-def log_uniform(low, high, size:Optional[List[int]]=None)->FloatOrTensor:
- "Draw 1 or shape=`size` random floats from uniform dist: min=log(`low`), max=log(`high`)."
- res = uniform(log(low), log(high), size)
- return exp(res) if size is None else res.exp_()
-
-def rand_bool(p:float, size:Optional[List[int]]=None)->BoolOrTensor:
- "Draw 1 or shape=`size` random booleans (`True` occuring with probability `p`)."
- return uniform(0,1,size)IntOrTensor:
- "Generate int or tensor `size` of ints between `low` and `high` (included)."
- return random.randint(low,high) if size is None else torch.randint(low,high+1,size)
-
-def one_param(m: nn.Module)->Tensor:
- "Return the first parameter of `m`."
- return next(m.parameters())
-
-def try_int(o:Any)->Any:
- "Try to convert `o` to int, default to `o` if not possible."
- # NB: single-item rank-1 array/tensor can be converted to int, but we don't want to do this
- if isinstance(o, (np.ndarray,Tensor)): return o if o.ndim else int(o)
- if isinstance(o, collections.abc.Sized) or getattr(o,'__array_interface__',False): return o
- try: return int(o)
- except: return o
-
-def get_model(model:nn.Module):
- "Return the model maybe wrapped inside `model`."
- return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model
-
-def flatten_check(out:Tensor, targ:Tensor) -> Tensor:
- "Check that `out` and `targ` have the same number of elements and flatten them."
- out,targ = out.contiguous().view(-1),targ.contiguous().view(-1)
- assert len(out) == len(targ), f"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}."
- return out,targ
-
-#Monkey-patch nn.DataParallel.reset
-def _data_parallel_reset(self):
- if hasattr(self.module, 'reset'): self.module.reset()
-nn.DataParallel.reset = _data_parallel_reset
-
-def remove_module_load(state_dict):
- """create new OrderedDict that does not contain `module.`"""
- new_state_dict = OrderedDict()
- for k, v in state_dict.items(): new_state_dict[k[7:]] = v
- return new_state_dict
-
-def num_distrib():
- "Return the number of processes in distributed training (if applicable)."
- return int(os.environ.get('WORLD_SIZE', 0))
-
-def rank_distrib():
- "Return the distributed rank of this process (if applicable)."
- return int(os.environ.get('RANK', 0))
-
-def add_metrics(last_metrics:Collection[Rank0Tensor], mets:Union[Rank0Tensor, Collection[Rank0Tensor]]):
- "Return a dictionary for updating `last_metrics` with `mets`."
- last_metrics,mets = listify(last_metrics),listify(mets)
- return {'last_metrics': last_metrics + mets}
-
-def try_save(state:Dict, path:Path=None, file:PathLikeOrBinaryStream=None):
- target = open(path/file, 'wb') if is_pathlike(file) else file
- try: torch.save(state, target)
- except OSError as e:
- raise Exception(f"{e}\n Can't write {path/file}. Pass an absolute writable pathlib obj `fname`.")
-
-def np_func(f):
- "Convert a function taking and returning numpy arrays to one taking and returning tensors"
- def _inner(*args, **kwargs):
- nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args]
- return tensor(f(*nargs, **kwargs))
- functools.update_wrapper(_inner, f)
- return _inner
-
diff --git a/spaces/Xenova/whisper-web/assets/index-6480d07e.js b/spaces/Xenova/whisper-web/assets/index-6480d07e.js
deleted file mode 100644
index fb450a2873f2a1ddd53eba5b492578045c92096e..0000000000000000000000000000000000000000
--- a/spaces/Xenova/whisper-web/assets/index-6480d07e.js
+++ /dev/null
@@ -1,47 +0,0 @@
-function Jd(e,t){for(var n=0;nr[l]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const o of l)if(o.type==="childList")for(const i of o.addedNodes)i.tagName==="LINK"&&i.rel==="modulepreload"&&r(i)}).observe(document,{childList:!0,subtree:!0});function n(l){const o={};return l.integrity&&(o.integrity=l.integrity),l.referrerPolicy&&(o.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?o.credentials="include":l.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(l){if(l.ep)return;l.ep=!0;const o=n(l);fetch(l.href,o)}})();function Zd(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var ec={exports:{}},lo={},tc={exports:{}},$={};/**
- * @license React
- * react.production.min.js
- *
- * Copyright (c) Facebook, Inc. and its affiliates.
- *
- * This source code is licensed under the MIT license found in the
- * LICENSE file in the root directory of this source tree.
- */var jr=Symbol.for("react.element"),ep=Symbol.for("react.portal"),tp=Symbol.for("react.fragment"),np=Symbol.for("react.strict_mode"),rp=Symbol.for("react.profiler"),lp=Symbol.for("react.provider"),op=Symbol.for("react.context"),ip=Symbol.for("react.forward_ref"),up=Symbol.for("react.suspense"),sp=Symbol.for("react.memo"),ap=Symbol.for("react.lazy"),Ls=Symbol.iterator;function cp(e){return e===null||typeof e!="object"?null:(e=Ls&&e[Ls]||e["@@iterator"],typeof e=="function"?e:null)}var nc={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},rc=Object.assign,lc={};function Hn(e,t,n){this.props=e,this.context=t,this.refs=lc,this.updater=n||nc}Hn.prototype.isReactComponent={};Hn.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};Hn.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function oc(){}oc.prototype=Hn.prototype;function ku(e,t,n){this.props=e,this.context=t,this.refs=lc,this.updater=n||nc}var xu=ku.prototype=new oc;xu.constructor=ku;rc(xu,Hn.prototype);xu.isPureReactComponent=!0;var _s=Array.isArray,ic=Object.prototype.hasOwnProperty,Cu={current:null},uc={key:!0,ref:!0,__self:!0,__source:!0};function sc(e,t,n){var r,l={},o=null,i=null;if(t!=null)for(r in t.ref!==void 0&&(i=t.ref),t.key!==void 0&&(o=""+t.key),t)ic.call(t,r)&&!uc.hasOwnProperty(r)&&(l[r]=t[r]);var u=arguments.length-2;if(u===1)l.children=n;else if(1>>1,ee=N[j];if(0>>1;jl(b,A))tel(G,b)?(N[j]=G,N[te]=A,j=te):(N[j]=b,N[st]=A,j=st);else if(tel(G,A))N[j]=G,N[te]=A,j=te;else break e}}return F}function l(N,F){var A=N.sortIndex-F.sortIndex;return A!==0?A:N.id-F.id}if(typeof performance=="object"&&typeof performance.now=="function"){var o=performance;e.unstable_now=function(){return o.now()}}else{var i=Date,u=i.now();e.unstable_now=function(){return i.now()-u}}var s=[],a=[],c=1,d=null,h=3,y=!1,m=!1,v=!1,P=typeof setTimeout=="function"?setTimeout:null,p=typeof clearTimeout=="function"?clearTimeout:null,f=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function g(N){for(var F=n(a);F!==null;){if(F.callback===null)r(a);else if(F.startTime<=N)r(a),F.sortIndex=F.expirationTime,t(s,F);else break;F=n(a)}}function E(N){if(v=!1,g(N),!m)if(n(s)!==null)m=!0,ut(C);else{var F=n(a);F!==null&&bt(E,F.startTime-N)}}function C(N,F){m=!1,v&&(v=!1,p(_),_=-1),y=!0;var A=h;try{for(g(F),d=n(s);d!==null&&(!(d.expirationTime>F)||N&&!V());){var j=d.callback;if(typeof j=="function"){d.callback=null,h=d.priorityLevel;var ee=j(d.expirationTime<=F);F=e.unstable_now(),typeof ee=="function"?d.callback=ee:d===n(s)&&r(s),g(F)}else r(s);d=n(s)}if(d!==null)var Kt=!0;else{var st=n(a);st!==null&&bt(E,st.startTime-F),Kt=!1}return Kt}finally{d=null,h=A,y=!1}}var L=!1,T=null,_=-1,U=5,O=-1;function V(){return!(e.unstable_now()-ON||125j?(N.sortIndex=A,t(a,N),n(s)===null&&N===n(a)&&(v?(p(_),_=-1):v=!0,bt(E,A-j))):(N.sortIndex=ee,t(s,N),m||y||(m=!0,ut(C))),N},e.unstable_shouldYield=V,e.unstable_wrapCallback=function(N){var F=h;return function(){var A=h;h=F;try{return N.apply(this,arguments)}finally{h=A}}}})(dc);fc.exports=dc;var Ep=fc.exports;/**
- * @license React
- * react-dom.production.min.js
- *
- * Copyright (c) Facebook, Inc. and its affiliates.
- *
- * This source code is licensed under the MIT license found in the
- * LICENSE file in the root directory of this source tree.
- */var pc=w,Re=Ep;function k(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),wi=Object.prototype.hasOwnProperty,kp=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,Os={},Fs={};function xp(e){return wi.call(Fs,e)?!0:wi.call(Os,e)?!1:kp.test(e)?Fs[e]=!0:(Os[e]=!0,!1)}function Cp(e,t,n,r){if(n!==null&&n.type===0)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return r?!1:n!==null?!n.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function Tp(e,t,n,r){if(t===null||typeof t>"u"||Cp(e,t,n,r))return!0;if(r)return!1;if(n!==null)switch(n.type){case 3:return!t;case 4:return t===!1;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}function Se(e,t,n,r,l,o,i){this.acceptsBooleans=t===2||t===3||t===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=o,this.removeEmptyString=i}var fe={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){fe[e]=new Se(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];fe[t]=new Se(t,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){fe[e]=new Se(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){fe[e]=new Se(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){fe[e]=new Se(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){fe[e]=new Se(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){fe[e]=new Se(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){fe[e]=new Se(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){fe[e]=new Se(e,5,!1,e.toLowerCase(),null,!1,!1)});var Pu=/[\-:]([a-z])/g;function Lu(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(Pu,Lu);fe[t]=new Se(t,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(Pu,Lu);fe[t]=new Se(t,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(Pu,Lu);fe[t]=new Se(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){fe[e]=new Se(e,1,!1,e.toLowerCase(),null,!1,!1)});fe.xlinkHref=new Se("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){fe[e]=new Se(e,1,!1,e.toLowerCase(),null,!0,!0)});function _u(e,t,n,r){var l=fe.hasOwnProperty(t)?fe[t]:null;(l!==null?l.type!==0:r||!(2u||l[i]!==o[u]){var s=`
-`+l[i].replace(" at new "," at ");return e.displayName&&s.includes("")&&(s=s.replace("",e.displayName)),s}while(1<=i&&0<=u);break}}}finally{Ao=!1,Error.prepareStackTrace=n}return(e=e?e.displayName||e.name:"")?ir(e):""}function Np(e){switch(e.tag){case 5:return ir(e.type);case 16:return ir("Lazy");case 13:return ir("Suspense");case 19:return ir("SuspenseList");case 0:case 2:case 15:return e=$o(e.type,!1),e;case 11:return e=$o(e.type.render,!1),e;case 1:return e=$o(e.type,!0),e;default:return""}}function xi(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case vn:return"Fragment";case gn:return"Portal";case Si:return"Profiler";case Ru:return"StrictMode";case Ei:return"Suspense";case ki:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case gc:return(e.displayName||"Context")+".Consumer";case hc:return(e._context.displayName||"Context")+".Provider";case Ou:var t=e.render;return e=e.displayName,e||(e=t.displayName||t.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case Fu:return t=e.displayName||null,t!==null?t:xi(e.type)||"Memo";case Tt:t=e._payload,e=e._init;try{return xi(e(t))}catch{}}return null}function Pp(e){var t=e.type;switch(e.tag){case 24:return"Cache";case 9:return(t.displayName||"Context")+".Consumer";case 10:return(t._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=t.render,e=e.displayName||e.name||"",t.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return t;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return xi(t);case 8:return t===Ru?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof t=="function")return t.displayName||t.name||null;if(typeof t=="string")return t}return null}function Bt(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function yc(e){var t=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(t==="checkbox"||t==="radio")}function Lp(e){var t=yc(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&typeof n<"u"&&typeof n.get=="function"&&typeof n.set=="function"){var l=n.get,o=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return l.call(this)},set:function(i){r=""+i,o.call(this,i)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(i){r=""+i},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}function Xr(e){e._valueTracker||(e._valueTracker=Lp(e))}function wc(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=yc(e)?e.checked?"true":"false":e.value),e=r,e!==n?(t.setValue(e),!0):!1}function Rl(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function Ci(e,t){var n=t.checked;return X({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:n??e._wrapperState.initialChecked})}function $s(e,t){var n=t.defaultValue==null?"":t.defaultValue,r=t.checked!=null?t.checked:t.defaultChecked;n=Bt(t.value!=null?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:t.type==="checkbox"||t.type==="radio"?t.checked!=null:t.value!=null}}function Sc(e,t){t=t.checked,t!=null&&_u(e,"checked",t,!1)}function Ti(e,t){Sc(e,t);var n=Bt(t.value),r=t.type;if(n!=null)r==="number"?(n===0&&e.value===""||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}t.hasOwnProperty("value")?Ni(e,t.type,n):t.hasOwnProperty("defaultValue")&&Ni(e,t.type,Bt(t.defaultValue)),t.checked==null&&t.defaultChecked!=null&&(e.defaultChecked=!!t.defaultChecked)}function Ds(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!(r!=="submit"&&r!=="reset"||t.value!==void 0&&t.value!==null))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}n=e.name,n!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,n!==""&&(e.name=n)}function Ni(e,t,n){(t!=="number"||Rl(e.ownerDocument)!==e)&&(n==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}var ur=Array.isArray;function Ln(e,t,n,r){if(e=e.options,t){t={};for(var l=0;l"+t.valueOf().toString()+"",t=Jr.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function Er(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&n.nodeType===3){n.nodeValue=t;return}}e.textContent=t}var fr={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},_p=["Webkit","ms","Moz","O"];Object.keys(fr).forEach(function(e){_p.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),fr[t]=fr[e]})});function Cc(e,t,n){return t==null||typeof t=="boolean"||t===""?"":n||typeof t!="number"||t===0||fr.hasOwnProperty(e)&&fr[e]?(""+t).trim():t+"px"}function Tc(e,t){e=e.style;for(var n in t)if(t.hasOwnProperty(n)){var r=n.indexOf("--")===0,l=Cc(n,t[n],r);n==="float"&&(n="cssFloat"),r?e.setProperty(n,l):e[n]=l}}var Rp=X({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function _i(e,t){if(t){if(Rp[e]&&(t.children!=null||t.dangerouslySetInnerHTML!=null))throw Error(k(137,e));if(t.dangerouslySetInnerHTML!=null){if(t.children!=null)throw Error(k(60));if(typeof t.dangerouslySetInnerHTML!="object"||!("__html"in t.dangerouslySetInnerHTML))throw Error(k(61))}if(t.style!=null&&typeof t.style!="object")throw Error(k(62))}}function Ri(e,t){if(e.indexOf("-")===-1)return typeof t.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var Oi=null;function Au(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var Fi=null,_n=null,Rn=null;function js(e){if(e=Br(e)){if(typeof Fi!="function")throw Error(k(280));var t=e.stateNode;t&&(t=ao(t),Fi(e.stateNode,e.type,t))}}function Nc(e){_n?Rn?Rn.push(e):Rn=[e]:_n=e}function Pc(){if(_n){var e=_n,t=Rn;if(Rn=_n=null,js(e),t)for(e=0;e>>=0,e===0?32:31-(Bp(e)/Hp|0)|0}var Zr=64,el=4194304;function sr(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function $l(e,t){var n=e.pendingLanes;if(n===0)return 0;var r=0,l=e.suspendedLanes,o=e.pingedLanes,i=n&268435455;if(i!==0){var u=i&~l;u!==0?r=sr(u):(o&=i,o!==0&&(r=sr(o)))}else i=n&~l,i!==0?r=sr(i):o!==0&&(r=sr(o));if(r===0)return 0;if(t!==0&&t!==r&&!(t&l)&&(l=r&-r,o=t&-t,l>=o||l===16&&(o&4194240)!==0))return t;if(r&4&&(r|=n&16),t=e.entangledLanes,t!==0)for(e=e.entanglements,t&=r;0n;n++)t.push(e);return t}function Ur(e,t,n){e.pendingLanes|=t,t!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,t=31-Ge(t),e[t]=n}function bp(e,t){var n=e.pendingLanes&~t;e.pendingLanes=t,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=t,e.mutableReadLanes&=t,e.entangledLanes&=t,t=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=pr),Ks=String.fromCharCode(32),Gs=!1;function Kc(e,t){switch(e){case"keyup":return Sm.indexOf(t.keyCode)!==-1;case"keydown":return t.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Gc(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var yn=!1;function km(e,t){switch(e){case"compositionend":return Gc(t);case"keypress":return t.which!==32?null:(Gs=!0,Ks);case"textInput":return e=t.data,e===Ks&&Gs?null:e;default:return null}}function xm(e,t){if(yn)return e==="compositionend"||!Bu&&Kc(e,t)?(e=Qc(),gl=ju=Ot=null,yn=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1=t)return{node:n,offset:t-e};e=r}e:{for(;n;){if(n.nextSibling){n=n.nextSibling;break e}n=n.parentNode}n=void 0}n=Js(n)}}function Jc(e,t){return e&&t?e===t?!0:e&&e.nodeType===3?!1:t&&t.nodeType===3?Jc(e,t.parentNode):"contains"in e?e.contains(t):e.compareDocumentPosition?!!(e.compareDocumentPosition(t)&16):!1:!1}function Zc(){for(var e=window,t=Rl();t instanceof e.HTMLIFrameElement;){try{var n=typeof t.contentWindow.location.href=="string"}catch{n=!1}if(n)e=t.contentWindow;else break;t=Rl(e.document)}return t}function Hu(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&(t==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||t==="textarea"||e.contentEditable==="true")}function Fm(e){var t=Zc(),n=e.focusedElem,r=e.selectionRange;if(t!==n&&n&&n.ownerDocument&&Jc(n.ownerDocument.documentElement,n)){if(r!==null&&Hu(n)){if(t=r.start,e=r.end,e===void 0&&(e=t),"selectionStart"in n)n.selectionStart=t,n.selectionEnd=Math.min(e,n.value.length);else if(e=(t=n.ownerDocument||document)&&t.defaultView||window,e.getSelection){e=e.getSelection();var l=n.textContent.length,o=Math.min(r.start,l);r=r.end===void 0?o:Math.min(r.end,l),!e.extend&&o>r&&(l=r,r=o,o=l),l=Zs(n,o);var i=Zs(n,r);l&&i&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==i.node||e.focusOffset!==i.offset)&&(t=t.createRange(),t.setStart(l.node,l.offset),e.removeAllRanges(),o>r?(e.addRange(t),e.extend(i.node,i.offset)):(t.setEnd(i.node,i.offset),e.addRange(t)))}}for(t=[],e=n;e=e.parentNode;)e.nodeType===1&&t.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof n.focus=="function"&&n.focus(),n=0;n=document.documentMode,wn=null,ji=null,hr=null,Ui=!1;function ea(e,t,n){var r=n.window===n?n.document:n.nodeType===9?n:n.ownerDocument;Ui||wn==null||wn!==Rl(r)||(r=wn,"selectionStart"in r&&Hu(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),hr&&Pr(hr,r)||(hr=r,r=Ml(ji,"onSelect"),0kn||(e.current=Qi[kn],Qi[kn]=null,kn--)}function H(e,t){kn++,Qi[kn]=e.current,e.current=t}var Ht={},ge=Wt(Ht),xe=Wt(!1),un=Ht;function Dn(e,t){var n=e.type.contextTypes;if(!n)return Ht;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var l={},o;for(o in n)l[o]=t[o];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=l),l}function Ce(e){return e=e.childContextTypes,e!=null}function Ul(){Q(xe),Q(ge)}function ua(e,t,n){if(ge.current!==Ht)throw Error(k(168));H(ge,t),H(xe,n)}function af(e,t,n){var r=e.stateNode;if(t=t.childContextTypes,typeof r.getChildContext!="function")return n;r=r.getChildContext();for(var l in r)if(!(l in t))throw Error(k(108,Pp(e)||"Unknown",l));return X({},n,r)}function Il(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||Ht,un=ge.current,H(ge,e),H(xe,xe.current),!0}function sa(e,t,n){var r=e.stateNode;if(!r)throw Error(k(169));n?(e=af(e,t,un),r.__reactInternalMemoizedMergedChildContext=e,Q(xe),Q(ge),H(ge,e)):Q(xe),H(xe,n)}var ct=null,co=!1,Go=!1;function cf(e){ct===null?ct=[e]:ct.push(e)}function Wm(e){co=!0,cf(e)}function Qt(){if(!Go&&ct!==null){Go=!0;var e=0,t=I;try{var n=ct;for(I=1;e>=i,l-=i,ft=1<<32-Ge(t)+l|n<_?(U=T,T=null):U=T.sibling;var O=h(p,T,g[_],E);if(O===null){T===null&&(T=U);break}e&&T&&O.alternate===null&&t(p,T),f=o(O,f,_),L===null?C=O:L.sibling=O,L=O,T=U}if(_===g.length)return n(p,T),K&&qt(p,_),C;if(T===null){for(;__?(U=T,T=null):U=T.sibling;var V=h(p,T,O.value,E);if(V===null){T===null&&(T=U);break}e&&T&&V.alternate===null&&t(p,T),f=o(V,f,_),L===null?C=V:L.sibling=V,L=V,T=U}if(O.done)return n(p,T),K&&qt(p,_),C;if(T===null){for(;!O.done;_++,O=g.next())O=d(p,O.value,E),O!==null&&(f=o(O,f,_),L===null?C=O:L.sibling=O,L=O);return K&&qt(p,_),C}for(T=r(p,T);!O.done;_++,O=g.next())O=y(T,p,_,O.value,E),O!==null&&(e&&O.alternate!==null&&T.delete(O.key===null?_:O.key),f=o(O,f,_),L===null?C=O:L.sibling=O,L=O);return e&&T.forEach(function(He){return t(p,He)}),K&&qt(p,_),C}function P(p,f,g,E){if(typeof g=="object"&&g!==null&&g.type===vn&&g.key===null&&(g=g.props.children),typeof g=="object"&&g!==null){switch(g.$$typeof){case Yr:e:{for(var C=g.key,L=f;L!==null;){if(L.key===C){if(C=g.type,C===vn){if(L.tag===7){n(p,L.sibling),f=l(L,g.props.children),f.return=p,p=f;break e}}else if(L.elementType===C||typeof C=="object"&&C!==null&&C.$$typeof===Tt&&ha(C)===L.type){n(p,L.sibling),f=l(L,g.props),f.ref=er(p,L,g),f.return=p,p=f;break e}n(p,L);break}else t(p,L);L=L.sibling}g.type===vn?(f=rn(g.props.children,p.mode,E,g.key),f.return=p,p=f):(E=Cl(g.type,g.key,g.props,null,p.mode,E),E.ref=er(p,f,g),E.return=p,p=E)}return i(p);case gn:e:{for(L=g.key;f!==null;){if(f.key===L)if(f.tag===4&&f.stateNode.containerInfo===g.containerInfo&&f.stateNode.implementation===g.implementation){n(p,f.sibling),f=l(f,g.children||[]),f.return=p,p=f;break e}else{n(p,f);break}else t(p,f);f=f.sibling}f=ni(g,p.mode,E),f.return=p,p=f}return i(p);case Tt:return L=g._init,P(p,f,L(g._payload),E)}if(ur(g))return m(p,f,g,E);if(qn(g))return v(p,f,g,E);ul(p,g)}return typeof g=="string"&&g!==""||typeof g=="number"?(g=""+g,f!==null&&f.tag===6?(n(p,f.sibling),f=l(f,g),f.return=p,p=f):(n(p,f),f=ti(g,p.mode,E),f.return=p,p=f),i(p)):n(p,f)}return P}var Mn=yf(!0),wf=yf(!1),Hr={},rt=Wt(Hr),Or=Wt(Hr),Fr=Wt(Hr);function en(e){if(e===Hr)throw Error(k(174));return e}function Xu(e,t){switch(H(Fr,t),H(Or,e),H(rt,Hr),e=t.nodeType,e){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:Li(null,"");break;default:e=e===8?t.parentNode:t,t=e.namespaceURI||null,e=e.tagName,t=Li(t,e)}Q(rt),H(rt,t)}function jn(){Q(rt),Q(Or),Q(Fr)}function Sf(e){en(Fr.current);var t=en(rt.current),n=Li(t,e.type);t!==n&&(H(Or,e),H(rt,n))}function Ju(e){Or.current===e&&(Q(rt),Q(Or))}var q=Wt(0);function bl(e){for(var t=e;t!==null;){if(t.tag===13){var n=t.memoizedState;if(n!==null&&(n=n.dehydrated,n===null||n.data==="$?"||n.data==="$!"))return t}else if(t.tag===19&&t.memoizedProps.revealOrder!==void 0){if(t.flags&128)return t}else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===e)break;for(;t.sibling===null;){if(t.return===null||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}var qo=[];function Zu(){for(var e=0;en?n:4,e(!0);var r=Yo.transition;Yo.transition={};try{e(!1),t()}finally{I=n,Yo.transition=r}}function zf(){return Ie().memoizedState}function Gm(e,t,n){var r=Ut(e);if(n={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null},Mf(e))jf(t,n);else if(n=mf(e,t,n,r),n!==null){var l=ye();qe(n,e,r,l),Uf(n,t,r)}}function qm(e,t,n){var r=Ut(e),l={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null};if(Mf(e))jf(t,l);else{var o=e.alternate;if(e.lanes===0&&(o===null||o.lanes===0)&&(o=t.lastRenderedReducer,o!==null))try{var i=t.lastRenderedState,u=o(i,n);if(l.hasEagerState=!0,l.eagerState=u,Ye(u,i)){var s=t.interleaved;s===null?(l.next=l,qu(t)):(l.next=s.next,s.next=l),t.interleaved=l;return}}catch{}finally{}n=mf(e,t,l,r),n!==null&&(l=ye(),qe(n,e,r,l),Uf(n,t,r))}}function Mf(e){var t=e.alternate;return e===Y||t!==null&&t===Y}function jf(e,t){gr=Kl=!0;var n=e.pending;n===null?t.next=t:(t.next=n.next,n.next=t),e.pending=t}function Uf(e,t,n){if(n&4194240){var r=t.lanes;r&=e.pendingLanes,n|=r,t.lanes=n,Du(e,n)}}var Gl={readContext:Ue,useCallback:de,useContext:de,useEffect:de,useImperativeHandle:de,useInsertionEffect:de,useLayoutEffect:de,useMemo:de,useReducer:de,useRef:de,useState:de,useDebugValue:de,useDeferredValue:de,useTransition:de,useMutableSource:de,useSyncExternalStore:de,useId:de,unstable_isNewReconciler:!1},Ym={readContext:Ue,useCallback:function(e,t){return Ze().memoizedState=[e,t===void 0?null:t],e},useContext:Ue,useEffect:va,useImperativeHandle:function(e,t,n){return n=n!=null?n.concat([e]):null,Sl(4194308,4,Of.bind(null,t,e),n)},useLayoutEffect:function(e,t){return Sl(4194308,4,e,t)},useInsertionEffect:function(e,t){return Sl(4,2,e,t)},useMemo:function(e,t){var n=Ze();return t=t===void 0?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=Ze();return t=n!==void 0?n(t):t,r.memoizedState=r.baseState=t,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:t},r.queue=e,e=e.dispatch=Gm.bind(null,Y,e),[r.memoizedState,e]},useRef:function(e){var t=Ze();return e={current:e},t.memoizedState=e},useState:ga,useDebugValue:ls,useDeferredValue:function(e){return Ze().memoizedState=e},useTransition:function(){var e=ga(!1),t=e[0];return e=Km.bind(null,e[1]),Ze().memoizedState=e,[t,e]},useMutableSource:function(){},useSyncExternalStore:function(e,t,n){var r=Y,l=Ze();if(K){if(n===void 0)throw Error(k(407));n=n()}else{if(n=t(),se===null)throw Error(k(349));an&30||xf(r,t,n)}l.memoizedState=n;var o={value:n,getSnapshot:t};return l.queue=o,va(Tf.bind(null,r,o,e),[e]),r.flags|=2048,Dr(9,Cf.bind(null,r,o,n,t),void 0,null),n},useId:function(){var e=Ze(),t=se.identifierPrefix;if(K){var n=dt,r=ft;n=(r&~(1<<32-Ge(r)-1)).toString(32)+n,t=":"+t+"R"+n,n=Ar++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=i.createElement(n,{is:r.is}):(e=i.createElement(n),n==="select"&&(i=e,r.multiple?i.multiple=!0:r.size&&(i.size=r.size))):e=i.createElementNS(e,n),e[et]=t,e[Rr]=r,Gf(e,t,!1,!1),t.stateNode=e;e:{switch(i=Ri(n,r),n){case"dialog":W("cancel",e),W("close",e),l=r;break;case"iframe":case"object":case"embed":W("load",e),l=r;break;case"video":case"audio":for(l=0;lIn&&(t.flags|=128,r=!0,tr(o,!1),t.lanes=4194304)}else{if(!r)if(e=bl(i),e!==null){if(t.flags|=128,r=!0,n=e.updateQueue,n!==null&&(t.updateQueue=n,t.flags|=4),tr(o,!0),o.tail===null&&o.tailMode==="hidden"&&!i.alternate&&!K)return pe(t),null}else 2*Z()-o.renderingStartTime>In&&n!==1073741824&&(t.flags|=128,r=!0,tr(o,!1),t.lanes=4194304);o.isBackwards?(i.sibling=t.child,t.child=i):(n=o.last,n!==null?n.sibling=i:t.child=i,o.last=i)}return o.tail!==null?(t=o.tail,o.rendering=t,o.tail=t.sibling,o.renderingStartTime=Z(),t.sibling=null,n=q.current,H(q,r?n&1|2:n&1),t):(pe(t),null);case 22:case 23:return cs(),r=t.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(t.flags|=8192),r&&t.mode&1?Ne&1073741824&&(pe(t),t.subtreeFlags&6&&(t.flags|=8192)):pe(t),null;case 24:return null;case 25:return null}throw Error(k(156,t.tag))}function lh(e,t){switch(Wu(t),t.tag){case 1:return Ce(t.type)&&Ul(),e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 3:return jn(),Q(xe),Q(ge),Zu(),e=t.flags,e&65536&&!(e&128)?(t.flags=e&-65537|128,t):null;case 5:return Ju(t),null;case 13:if(Q(q),e=t.memoizedState,e!==null&&e.dehydrated!==null){if(t.alternate===null)throw Error(k(340));zn()}return e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 19:return Q(q),null;case 4:return jn(),null;case 10:return Gu(t.type._context),null;case 22:case 23:return cs(),null;case 24:return null;default:return null}}var al=!1,me=!1,oh=typeof WeakSet=="function"?WeakSet:Set,R=null;function Nn(e,t){var n=e.ref;if(n!==null)if(typeof n=="function")try{n(null)}catch(r){J(e,t,r)}else n.current=null}function ru(e,t,n){try{n()}catch(r){J(e,t,r)}}var Na=!1;function ih(e,t){if(Ii=Dl,e=Zc(),Hu(e)){if("selectionStart"in e)var n={start:e.selectionStart,end:e.selectionEnd};else e:{n=(n=e.ownerDocument)&&n.defaultView||window;var r=n.getSelection&&n.getSelection();if(r&&r.rangeCount!==0){n=r.anchorNode;var l=r.anchorOffset,o=r.focusNode;r=r.focusOffset;try{n.nodeType,o.nodeType}catch{n=null;break e}var i=0,u=-1,s=-1,a=0,c=0,d=e,h=null;t:for(;;){for(var y;d!==n||l!==0&&d.nodeType!==3||(u=i+l),d!==o||r!==0&&d.nodeType!==3||(s=i+r),d.nodeType===3&&(i+=d.nodeValue.length),(y=d.firstChild)!==null;)h=d,d=y;for(;;){if(d===e)break t;if(h===n&&++a===l&&(u=i),h===o&&++c===r&&(s=i),(y=d.nextSibling)!==null)break;d=h,h=d.parentNode}d=y}n=u===-1||s===-1?null:{start:u,end:s}}else n=null}n=n||{start:0,end:0}}else n=null;for(Bi={focusedElem:e,selectionRange:n},Dl=!1,R=t;R!==null;)if(t=R,e=t.child,(t.subtreeFlags&1028)!==0&&e!==null)e.return=t,R=e;else for(;R!==null;){t=R;try{var m=t.alternate;if(t.flags&1024)switch(t.tag){case 0:case 11:case 15:break;case 1:if(m!==null){var v=m.memoizedProps,P=m.memoizedState,p=t.stateNode,f=p.getSnapshotBeforeUpdate(t.elementType===t.type?v:Qe(t.type,v),P);p.__reactInternalSnapshotBeforeUpdate=f}break;case 3:var g=t.stateNode.containerInfo;g.nodeType===1?g.textContent="":g.nodeType===9&&g.documentElement&&g.removeChild(g.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(k(163))}}catch(E){J(t,t.return,E)}if(e=t.sibling,e!==null){e.return=t.return,R=e;break}R=t.return}return m=Na,Na=!1,m}function vr(e,t,n){var r=t.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var o=l.destroy;l.destroy=void 0,o!==void 0&&ru(t,n,o)}l=l.next}while(l!==r)}}function mo(e,t){if(t=t.updateQueue,t=t!==null?t.lastEffect:null,t!==null){var n=t=t.next;do{if((n.tag&e)===e){var r=n.create;n.destroy=r()}n=n.next}while(n!==t)}}function lu(e){var t=e.ref;if(t!==null){var n=e.stateNode;switch(e.tag){case 5:e=n;break;default:e=n}typeof t=="function"?t(e):t.current=e}}function Xf(e){var t=e.alternate;t!==null&&(e.alternate=null,Xf(t)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(t=e.stateNode,t!==null&&(delete t[et],delete t[Rr],delete t[Wi],delete t[Hm],delete t[Vm])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function Jf(e){return e.tag===5||e.tag===3||e.tag===4}function Pa(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||Jf(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function ou(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.nodeType===8?n.parentNode.insertBefore(e,t):n.insertBefore(e,t):(n.nodeType===8?(t=n.parentNode,t.insertBefore(e,n)):(t=n,t.appendChild(e)),n=n._reactRootContainer,n!=null||t.onclick!==null||(t.onclick=jl));else if(r!==4&&(e=e.child,e!==null))for(ou(e,t,n),e=e.sibling;e!==null;)ou(e,t,n),e=e.sibling}function iu(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.insertBefore(e,t):n.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for(iu(e,t,n),e=e.sibling;e!==null;)iu(e,t,n),e=e.sibling}var ae=null,be=!1;function xt(e,t,n){for(n=n.child;n!==null;)Zf(e,t,n),n=n.sibling}function Zf(e,t,n){if(nt&&typeof nt.onCommitFiberUnmount=="function")try{nt.onCommitFiberUnmount(oo,n)}catch{}switch(n.tag){case 5:me||Nn(n,t);case 6:var r=ae,l=be;ae=null,xt(e,t,n),ae=r,be=l,ae!==null&&(be?(e=ae,n=n.stateNode,e.nodeType===8?e.parentNode.removeChild(n):e.removeChild(n)):ae.removeChild(n.stateNode));break;case 18:ae!==null&&(be?(e=ae,n=n.stateNode,e.nodeType===8?Ko(e.parentNode,n):e.nodeType===1&&Ko(e,n),Tr(e)):Ko(ae,n.stateNode));break;case 4:r=ae,l=be,ae=n.stateNode.containerInfo,be=!0,xt(e,t,n),ae=r,be=l;break;case 0:case 11:case 14:case 15:if(!me&&(r=n.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var o=l,i=o.destroy;o=o.tag,i!==void 0&&(o&2||o&4)&&ru(n,t,i),l=l.next}while(l!==r)}xt(e,t,n);break;case 1:if(!me&&(Nn(n,t),r=n.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=n.memoizedProps,r.state=n.memoizedState,r.componentWillUnmount()}catch(u){J(n,t,u)}xt(e,t,n);break;case 21:xt(e,t,n);break;case 22:n.mode&1?(me=(r=me)||n.memoizedState!==null,xt(e,t,n),me=r):xt(e,t,n);break;default:xt(e,t,n)}}function La(e){var t=e.updateQueue;if(t!==null){e.updateQueue=null;var n=e.stateNode;n===null&&(n=e.stateNode=new oh),t.forEach(function(r){var l=hh.bind(null,e,r);n.has(r)||(n.add(r),r.then(l,l))})}}function We(e,t){var n=t.deletions;if(n!==null)for(var r=0;rl&&(l=i),r&=~o}if(r=l,r=Z()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*sh(r/1960))-r,10e?16:e,Ft===null)var r=!1;else{if(e=Ft,Ft=null,Xl=0,M&6)throw Error(k(331));var l=M;for(M|=4,R=e.current;R!==null;){var o=R,i=o.child;if(R.flags&16){var u=o.deletions;if(u!==null){for(var s=0;sZ()-ss?nn(e,0):us|=n),Te(e,t)}function ud(e,t){t===0&&(e.mode&1?(t=el,el<<=1,!(el&130023424)&&(el=4194304)):t=1);var n=ye();e=St(e,t),e!==null&&(Ur(e,t,n),Te(e,n))}function mh(e){var t=e.memoizedState,n=0;t!==null&&(n=t.retryLane),ud(e,n)}function hh(e,t){var n=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(n=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(k(314))}r!==null&&r.delete(t),ud(e,n)}var sd;sd=function(e,t,n){if(e!==null)if(e.memoizedProps!==t.pendingProps||xe.current)ke=!0;else{if(!(e.lanes&n)&&!(t.flags&128))return ke=!1,nh(e,t,n);ke=!!(e.flags&131072)}else ke=!1,K&&t.flags&1048576&&ff(t,Hl,t.index);switch(t.lanes=0,t.tag){case 2:var r=t.type;El(e,t),e=t.pendingProps;var l=Dn(t,ge.current);Fn(t,n),l=ts(null,t,r,e,l,n);var o=ns();return t.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(t.tag=1,t.memoizedState=null,t.updateQueue=null,Ce(r)?(o=!0,Il(t)):o=!1,t.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,Yu(t),l.updater=fo,t.stateNode=l,l._reactInternals=t,Yi(t,r,e,n),t=Zi(null,t,r,!0,o,n)):(t.tag=0,K&&o&&Vu(t),ve(null,t,l,n),t=t.child),t;case 16:r=t.elementType;e:{switch(El(e,t),e=t.pendingProps,l=r._init,r=l(r._payload),t.type=r,l=t.tag=vh(r),e=Qe(r,e),l){case 0:t=Ji(null,t,r,e,n);break e;case 1:t=xa(null,t,r,e,n);break e;case 11:t=Ea(null,t,r,e,n);break e;case 14:t=ka(null,t,r,Qe(r.type,e),n);break e}throw Error(k(306,r,""))}return t;case 0:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Qe(r,l),Ji(e,t,r,l,n);case 1:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Qe(r,l),xa(e,t,r,l,n);case 3:e:{if(Qf(t),e===null)throw Error(k(387));r=t.pendingProps,o=t.memoizedState,l=o.element,hf(e,t),Ql(t,r,null,n);var i=t.memoizedState;if(r=i.element,o.isDehydrated)if(o={element:r,isDehydrated:!1,cache:i.cache,pendingSuspenseBoundaries:i.pendingSuspenseBoundaries,transitions:i.transitions},t.updateQueue.baseState=o,t.memoizedState=o,t.flags&256){l=Un(Error(k(423)),t),t=Ca(e,t,r,n,l);break e}else if(r!==l){l=Un(Error(k(424)),t),t=Ca(e,t,r,n,l);break e}else for(Le=zt(t.stateNode.containerInfo.firstChild),_e=t,K=!0,Ke=null,n=wf(t,null,r,n),t.child=n;n;)n.flags=n.flags&-3|4096,n=n.sibling;else{if(zn(),r===l){t=Et(e,t,n);break e}ve(e,t,r,n)}t=t.child}return t;case 5:return Sf(t),e===null&&Ki(t),r=t.type,l=t.pendingProps,o=e!==null?e.memoizedProps:null,i=l.children,Hi(r,l)?i=null:o!==null&&Hi(r,o)&&(t.flags|=32),Wf(e,t),ve(e,t,i,n),t.child;case 6:return e===null&&Ki(t),null;case 13:return bf(e,t,n);case 4:return Xu(t,t.stateNode.containerInfo),r=t.pendingProps,e===null?t.child=Mn(t,null,r,n):ve(e,t,r,n),t.child;case 11:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Qe(r,l),Ea(e,t,r,l,n);case 7:return ve(e,t,t.pendingProps,n),t.child;case 8:return ve(e,t,t.pendingProps.children,n),t.child;case 12:return ve(e,t,t.pendingProps.children,n),t.child;case 10:e:{if(r=t.type._context,l=t.pendingProps,o=t.memoizedProps,i=l.value,H(Vl,r._currentValue),r._currentValue=i,o!==null)if(Ye(o.value,i)){if(o.children===l.children&&!xe.current){t=Et(e,t,n);break e}}else for(o=t.child,o!==null&&(o.return=t);o!==null;){var u=o.dependencies;if(u!==null){i=o.child;for(var s=u.firstContext;s!==null;){if(s.context===r){if(o.tag===1){s=mt(-1,n&-n),s.tag=2;var a=o.updateQueue;if(a!==null){a=a.shared;var c=a.pending;c===null?s.next=s:(s.next=c.next,c.next=s),a.pending=s}}o.lanes|=n,s=o.alternate,s!==null&&(s.lanes|=n),Gi(o.return,n,t),u.lanes|=n;break}s=s.next}}else if(o.tag===10)i=o.type===t.type?null:o.child;else if(o.tag===18){if(i=o.return,i===null)throw Error(k(341));i.lanes|=n,u=i.alternate,u!==null&&(u.lanes|=n),Gi(i,n,t),i=o.sibling}else i=o.child;if(i!==null)i.return=o;else for(i=o;i!==null;){if(i===t){i=null;break}if(o=i.sibling,o!==null){o.return=i.return,i=o;break}i=i.return}o=i}ve(e,t,l.children,n),t=t.child}return t;case 9:return l=t.type,r=t.pendingProps.children,Fn(t,n),l=Ue(l),r=r(l),t.flags|=1,ve(e,t,r,n),t.child;case 14:return r=t.type,l=Qe(r,t.pendingProps),l=Qe(r.type,l),ka(e,t,r,l,n);case 15:return Hf(e,t,t.type,t.pendingProps,n);case 17:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Qe(r,l),El(e,t),t.tag=1,Ce(r)?(e=!0,Il(t)):e=!1,Fn(t,n),vf(t,r,l),Yi(t,r,l,n),Zi(null,t,r,!0,e,n);case 19:return Kf(e,t,n);case 22:return Vf(e,t,n)}throw Error(k(156,t.tag))};function ad(e,t){return $c(e,t)}function gh(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function ze(e,t,n,r){return new gh(e,t,n,r)}function ds(e){return e=e.prototype,!(!e||!e.isReactComponent)}function vh(e){if(typeof e=="function")return ds(e)?1:0;if(e!=null){if(e=e.$$typeof,e===Ou)return 11;if(e===Fu)return 14}return 2}function It(e,t){var n=e.alternate;return n===null?(n=ze(e.tag,t,e.key,e.mode),n.elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.type=e.type,n.flags=0,n.subtreeFlags=0,n.deletions=null),n.flags=e.flags&14680064,n.childLanes=e.childLanes,n.lanes=e.lanes,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,t=e.dependencies,n.dependencies=t===null?null:{lanes:t.lanes,firstContext:t.firstContext},n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function Cl(e,t,n,r,l,o){var i=2;if(r=e,typeof e=="function")ds(e)&&(i=1);else if(typeof e=="string")i=5;else e:switch(e){case vn:return rn(n.children,l,o,t);case Ru:i=8,l|=8;break;case Si:return e=ze(12,n,t,l|2),e.elementType=Si,e.lanes=o,e;case Ei:return e=ze(13,n,t,l),e.elementType=Ei,e.lanes=o,e;case ki:return e=ze(19,n,t,l),e.elementType=ki,e.lanes=o,e;case vc:return go(n,l,o,t);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case hc:i=10;break e;case gc:i=9;break e;case Ou:i=11;break e;case Fu:i=14;break e;case Tt:i=16,r=null;break e}throw Error(k(130,e==null?e:typeof e,""))}return t=ze(i,n,t,l),t.elementType=e,t.type=r,t.lanes=o,t}function rn(e,t,n,r){return e=ze(7,e,r,t),e.lanes=n,e}function go(e,t,n,r){return e=ze(22,e,r,t),e.elementType=vc,e.lanes=n,e.stateNode={isHidden:!1},e}function ti(e,t,n){return e=ze(6,e,null,t),e.lanes=n,e}function ni(e,t,n){return t=ze(4,e.children!==null?e.children:[],e.key,t),t.lanes=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function yh(e,t,n,r,l){this.tag=t,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=zo(0),this.expirationTimes=zo(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=zo(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function ps(e,t,n,r,l,o,i,u,s){return e=new yh(e,t,n,u,s),t===1?(t=1,o===!0&&(t|=8)):t=0,o=ze(3,null,null,t),e.current=o,o.stateNode=e,o.memoizedState={element:r,isDehydrated:n,cache:null,transitions:null,pendingSuspenseBoundaries:null},Yu(o),e}function wh(e,t,n){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(pd)}catch(e){console.error(e)}}pd(),cc.exports=Oe;var md=cc.exports,za=md;yi.createRoot=za.createRoot,yi.hydrateRoot=za.hydrateRoot;function hd(e,t){return function(){return e.apply(t,arguments)}}const{toString:Ch}=Object.prototype,{getPrototypeOf:vs}=Object,Eo=(e=>t=>{const n=Ch.call(t);return e[n]||(e[n]=n.slice(8,-1).toLowerCase())})(Object.create(null)),lt=e=>(e=e.toLowerCase(),t=>Eo(t)===e),ko=e=>t=>typeof t===e,{isArray:Qn}=Array,Mr=ko("undefined");function Th(e){return e!==null&&!Mr(e)&&e.constructor!==null&&!Mr(e.constructor)&&je(e.constructor.isBuffer)&&e.constructor.isBuffer(e)}const gd=lt("ArrayBuffer");function Nh(e){let t;return typeof ArrayBuffer<"u"&&ArrayBuffer.isView?t=ArrayBuffer.isView(e):t=e&&e.buffer&&gd(e.buffer),t}const Ph=ko("string"),je=ko("function"),vd=ko("number"),xo=e=>e!==null&&typeof e=="object",Lh=e=>e===!0||e===!1,Tl=e=>{if(Eo(e)!=="object")return!1;const t=vs(e);return(t===null||t===Object.prototype||Object.getPrototypeOf(t)===null)&&!(Symbol.toStringTag in e)&&!(Symbol.iterator in e)},_h=lt("Date"),Rh=lt("File"),Oh=lt("Blob"),Fh=lt("FileList"),Ah=e=>xo(e)&&je(e.pipe),$h=e=>{let t;return e&&(typeof FormData=="function"&&e instanceof FormData||je(e.append)&&((t=Eo(e))==="formdata"||t==="object"&&je(e.toString)&&e.toString()==="[object FormData]"))},Dh=lt("URLSearchParams"),zh=e=>e.trim?e.trim():e.replace(/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,"");function Vr(e,t,{allOwnKeys:n=!1}={}){if(e===null||typeof e>"u")return;let r,l;if(typeof e!="object"&&(e=[e]),Qn(e))for(r=0,l=e.length;r0;)if(l=n[r],t===l.toLowerCase())return l;return null}const wd=(()=>typeof globalThis<"u"?globalThis:typeof self<"u"?self:typeof window<"u"?window:global)(),Sd=e=>!Mr(e)&&e!==wd;function fu(){const{caseless:e}=Sd(this)&&this||{},t={},n=(r,l)=>{const o=e&&yd(t,l)||l;Tl(t[o])&&Tl(r)?t[o]=fu(t[o],r):Tl(r)?t[o]=fu({},r):Qn(r)?t[o]=r.slice():t[o]=r};for(let r=0,l=arguments.length;r(Vr(t,(l,o)=>{n&&je(l)?e[o]=hd(l,n):e[o]=l},{allOwnKeys:r}),e),jh=e=>(e.charCodeAt(0)===65279&&(e=e.slice(1)),e),Uh=(e,t,n,r)=>{e.prototype=Object.create(t.prototype,r),e.prototype.constructor=e,Object.defineProperty(e,"super",{value:t.prototype}),n&&Object.assign(e.prototype,n)},Ih=(e,t,n,r)=>{let l,o,i;const u={};if(t=t||{},e==null)return t;do{for(l=Object.getOwnPropertyNames(e),o=l.length;o-- >0;)i=l[o],(!r||r(i,e,t))&&!u[i]&&(t[i]=e[i],u[i]=!0);e=n!==!1&&vs(e)}while(e&&(!n||n(e,t))&&e!==Object.prototype);return t},Bh=(e,t,n)=>{e=String(e),(n===void 0||n>e.length)&&(n=e.length),n-=t.length;const r=e.indexOf(t,n);return r!==-1&&r===n},Hh=e=>{if(!e)return null;if(Qn(e))return e;let t=e.length;if(!vd(t))return null;const n=new Array(t);for(;t-- >0;)n[t]=e[t];return n},Vh=(e=>t=>e&&t instanceof e)(typeof Uint8Array<"u"&&vs(Uint8Array)),Wh=(e,t)=>{const r=(e&&e[Symbol.iterator]).call(e);let l;for(;(l=r.next())&&!l.done;){const o=l.value;t.call(e,o[0],o[1])}},Qh=(e,t)=>{let n;const r=[];for(;(n=e.exec(t))!==null;)r.push(n);return r},bh=lt("HTMLFormElement"),Kh=e=>e.toLowerCase().replace(/[-_\s]([a-z\d])(\w*)/g,function(n,r,l){return r.toUpperCase()+l}),Ma=(({hasOwnProperty:e})=>(t,n)=>e.call(t,n))(Object.prototype),Gh=lt("RegExp"),Ed=(e,t)=>{const n=Object.getOwnPropertyDescriptors(e),r={};Vr(n,(l,o)=>{t(l,o,e)!==!1&&(r[o]=l)}),Object.defineProperties(e,r)},qh=e=>{Ed(e,(t,n)=>{if(je(e)&&["arguments","caller","callee"].indexOf(n)!==-1)return!1;const r=e[n];if(je(r)){if(t.enumerable=!1,"writable"in t){t.writable=!1;return}t.set||(t.set=()=>{throw Error("Can not rewrite read-only method '"+n+"'")})}})},Yh=(e,t)=>{const n={},r=l=>{l.forEach(o=>{n[o]=!0})};return Qn(e)?r(e):r(String(e).split(t)),n},Xh=()=>{},Jh=(e,t)=>(e=+e,Number.isFinite(e)?e:t),ri="abcdefghijklmnopqrstuvwxyz",ja="0123456789",kd={DIGIT:ja,ALPHA:ri,ALPHA_DIGIT:ri+ri.toUpperCase()+ja},Zh=(e=16,t=kd.ALPHA_DIGIT)=>{let n="";const{length:r}=t;for(;e--;)n+=t[Math.random()*r|0];return n};function e0(e){return!!(e&&je(e.append)&&e[Symbol.toStringTag]==="FormData"&&e[Symbol.iterator])}const t0=e=>{const t=new Array(10),n=(r,l)=>{if(xo(r)){if(t.indexOf(r)>=0)return;if(!("toJSON"in r)){t[l]=r;const o=Qn(r)?[]:{};return Vr(r,(i,u)=>{const s=n(i,l+1);!Mr(s)&&(o[u]=s)}),t[l]=void 0,o}}return r};return n(e,0)},n0=lt("AsyncFunction"),r0=e=>e&&(xo(e)||je(e))&&je(e.then)&&je(e.catch),S={isArray:Qn,isArrayBuffer:gd,isBuffer:Th,isFormData:$h,isArrayBufferView:Nh,isString:Ph,isNumber:vd,isBoolean:Lh,isObject:xo,isPlainObject:Tl,isUndefined:Mr,isDate:_h,isFile:Rh,isBlob:Oh,isRegExp:Gh,isFunction:je,isStream:Ah,isURLSearchParams:Dh,isTypedArray:Vh,isFileList:Fh,forEach:Vr,merge:fu,extend:Mh,trim:zh,stripBOM:jh,inherits:Uh,toFlatObject:Ih,kindOf:Eo,kindOfTest:lt,endsWith:Bh,toArray:Hh,forEachEntry:Wh,matchAll:Qh,isHTMLForm:bh,hasOwnProperty:Ma,hasOwnProp:Ma,reduceDescriptors:Ed,freezeMethods:qh,toObjectSet:Yh,toCamelCase:Kh,noop:Xh,toFiniteNumber:Jh,findKey:yd,global:wd,isContextDefined:Sd,ALPHABET:kd,generateString:Zh,isSpecCompliantForm:e0,toJSONObject:t0,isAsyncFn:n0,isThenable:r0};function z(e,t,n,r,l){Error.call(this),Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=new Error().stack,this.message=e,this.name="AxiosError",t&&(this.code=t),n&&(this.config=n),r&&(this.request=r),l&&(this.response=l)}S.inherits(z,Error,{toJSON:function(){return{message:this.message,name:this.name,description:this.description,number:this.number,fileName:this.fileName,lineNumber:this.lineNumber,columnNumber:this.columnNumber,stack:this.stack,config:S.toJSONObject(this.config),code:this.code,status:this.response&&this.response.status?this.response.status:null}}});const xd=z.prototype,Cd={};["ERR_BAD_OPTION_VALUE","ERR_BAD_OPTION","ECONNABORTED","ETIMEDOUT","ERR_NETWORK","ERR_FR_TOO_MANY_REDIRECTS","ERR_DEPRECATED","ERR_BAD_RESPONSE","ERR_BAD_REQUEST","ERR_CANCELED","ERR_NOT_SUPPORT","ERR_INVALID_URL"].forEach(e=>{Cd[e]={value:e}});Object.defineProperties(z,Cd);Object.defineProperty(xd,"isAxiosError",{value:!0});z.from=(e,t,n,r,l,o)=>{const i=Object.create(xd);return S.toFlatObject(e,i,function(s){return s!==Error.prototype},u=>u!=="isAxiosError"),z.call(i,e.message,t,n,r,l),i.cause=e,i.name=e.name,o&&Object.assign(i,o),i};const l0=null;function du(e){return S.isPlainObject(e)||S.isArray(e)}function Td(e){return S.endsWith(e,"[]")?e.slice(0,-2):e}function Ua(e,t,n){return e?e.concat(t).map(function(l,o){return l=Td(l),!n&&o?"["+l+"]":l}).join(n?".":""):t}function o0(e){return S.isArray(e)&&!e.some(du)}const i0=S.toFlatObject(S,{},null,function(t){return/^is[A-Z]/.test(t)});function Co(e,t,n){if(!S.isObject(e))throw new TypeError("target must be an object");t=t||new FormData,n=S.toFlatObject(n,{metaTokens:!0,dots:!1,indexes:!1},!1,function(v,P){return!S.isUndefined(P[v])});const r=n.metaTokens,l=n.visitor||c,o=n.dots,i=n.indexes,s=(n.Blob||typeof Blob<"u"&&Blob)&&S.isSpecCompliantForm(t);if(!S.isFunction(l))throw new TypeError("visitor must be a function");function a(m){if(m===null)return"";if(S.isDate(m))return m.toISOString();if(!s&&S.isBlob(m))throw new z("Blob is not supported. Use a Buffer instead.");return S.isArrayBuffer(m)||S.isTypedArray(m)?s&&typeof Blob=="function"?new Blob([m]):Buffer.from(m):m}function c(m,v,P){let p=m;if(m&&!P&&typeof m=="object"){if(S.endsWith(v,"{}"))v=r?v:v.slice(0,-2),m=JSON.stringify(m);else if(S.isArray(m)&&o0(m)||(S.isFileList(m)||S.endsWith(v,"[]"))&&(p=S.toArray(m)))return v=Td(v),p.forEach(function(g,E){!(S.isUndefined(g)||g===null)&&t.append(i===!0?Ua([v],E,o):i===null?v:v+"[]",a(g))}),!1}return du(m)?!0:(t.append(Ua(P,v,o),a(m)),!1)}const d=[],h=Object.assign(i0,{defaultVisitor:c,convertValue:a,isVisitable:du});function y(m,v){if(!S.isUndefined(m)){if(d.indexOf(m)!==-1)throw Error("Circular reference detected in "+v.join("."));d.push(m),S.forEach(m,function(p,f){(!(S.isUndefined(p)||p===null)&&l.call(t,p,S.isString(f)?f.trim():f,v,h))===!0&&y(p,v?v.concat(f):[f])}),d.pop()}}if(!S.isObject(e))throw new TypeError("data must be an object");return y(e),t}function Ia(e){const t={"!":"%21","'":"%27","(":"%28",")":"%29","~":"%7E","%20":"+","%00":"\0"};return encodeURIComponent(e).replace(/[!'()~]|%20|%00/g,function(r){return t[r]})}function ys(e,t){this._pairs=[],e&&Co(e,this,t)}const Nd=ys.prototype;Nd.append=function(t,n){this._pairs.push([t,n])};Nd.toString=function(t){const n=t?function(r){return t.call(this,r,Ia)}:Ia;return this._pairs.map(function(l){return n(l[0])+"="+n(l[1])},"").join("&")};function u0(e){return encodeURIComponent(e).replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%20/g,"+").replace(/%5B/gi,"[").replace(/%5D/gi,"]")}function Pd(e,t,n){if(!t)return e;const r=n&&n.encode||u0,l=n&&n.serialize;let o;if(l?o=l(t,n):o=S.isURLSearchParams(t)?t.toString():new ys(t,n).toString(r),o){const i=e.indexOf("#");i!==-1&&(e=e.slice(0,i)),e+=(e.indexOf("?")===-1?"?":"&")+o}return e}class s0{constructor(){this.handlers=[]}use(t,n,r){return this.handlers.push({fulfilled:t,rejected:n,synchronous:r?r.synchronous:!1,runWhen:r?r.runWhen:null}),this.handlers.length-1}eject(t){this.handlers[t]&&(this.handlers[t]=null)}clear(){this.handlers&&(this.handlers=[])}forEach(t){S.forEach(this.handlers,function(r){r!==null&&t(r)})}}const Ba=s0,Ld={silentJSONParsing:!0,forcedJSONParsing:!0,clarifyTimeoutError:!1},a0=typeof URLSearchParams<"u"?URLSearchParams:ys,c0=typeof FormData<"u"?FormData:null,f0=typeof Blob<"u"?Blob:null,d0=(()=>{let e;return typeof navigator<"u"&&((e=navigator.product)==="ReactNative"||e==="NativeScript"||e==="NS")?!1:typeof window<"u"&&typeof document<"u"})(),p0=(()=>typeof WorkerGlobalScope<"u"&&self instanceof WorkerGlobalScope&&typeof self.importScripts=="function")(),tt={isBrowser:!0,classes:{URLSearchParams:a0,FormData:c0,Blob:f0},isStandardBrowserEnv:d0,isStandardBrowserWebWorkerEnv:p0,protocols:["http","https","file","blob","url","data"]};function m0(e,t){return Co(e,new tt.classes.URLSearchParams,Object.assign({visitor:function(n,r,l,o){return tt.isNode&&S.isBuffer(n)?(this.append(r,n.toString("base64")),!1):o.defaultVisitor.apply(this,arguments)}},t))}function h0(e){return S.matchAll(/\w+|\[(\w*)]/g,e).map(t=>t[0]==="[]"?"":t[1]||t[0])}function g0(e){const t={},n=Object.keys(e);let r;const l=n.length;let o;for(r=0;r=n.length;return i=!i&&S.isArray(l)?l.length:i,s?(S.hasOwnProp(l,i)?l[i]=[l[i],r]:l[i]=r,!u):((!l[i]||!S.isObject(l[i]))&&(l[i]=[]),t(n,r,l[i],o)&&S.isArray(l[i])&&(l[i]=g0(l[i])),!u)}if(S.isFormData(e)&&S.isFunction(e.entries)){const n={};return S.forEachEntry(e,(r,l)=>{t(h0(r),l,n,0)}),n}return null}const v0={"Content-Type":void 0};function y0(e,t,n){if(S.isString(e))try{return(t||JSON.parse)(e),S.trim(e)}catch(r){if(r.name!=="SyntaxError")throw r}return(n||JSON.stringify)(e)}const To={transitional:Ld,adapter:["xhr","http"],transformRequest:[function(t,n){const r=n.getContentType()||"",l=r.indexOf("application/json")>-1,o=S.isObject(t);if(o&&S.isHTMLForm(t)&&(t=new FormData(t)),S.isFormData(t))return l&&l?JSON.stringify(_d(t)):t;if(S.isArrayBuffer(t)||S.isBuffer(t)||S.isStream(t)||S.isFile(t)||S.isBlob(t))return t;if(S.isArrayBufferView(t))return t.buffer;if(S.isURLSearchParams(t))return n.setContentType("application/x-www-form-urlencoded;charset=utf-8",!1),t.toString();let u;if(o){if(r.indexOf("application/x-www-form-urlencoded")>-1)return m0(t,this.formSerializer).toString();if((u=S.isFileList(t))||r.indexOf("multipart/form-data")>-1){const s=this.env&&this.env.FormData;return Co(u?{"files[]":t}:t,s&&new s,this.formSerializer)}}return o||l?(n.setContentType("application/json",!1),y0(t)):t}],transformResponse:[function(t){const n=this.transitional||To.transitional,r=n&&n.forcedJSONParsing,l=this.responseType==="json";if(t&&S.isString(t)&&(r&&!this.responseType||l)){const i=!(n&&n.silentJSONParsing)&&l;try{return JSON.parse(t)}catch(u){if(i)throw u.name==="SyntaxError"?z.from(u,z.ERR_BAD_RESPONSE,this,null,this.response):u}}return t}],timeout:0,xsrfCookieName:"XSRF-TOKEN",xsrfHeaderName:"X-XSRF-TOKEN",maxContentLength:-1,maxBodyLength:-1,env:{FormData:tt.classes.FormData,Blob:tt.classes.Blob},validateStatus:function(t){return t>=200&&t<300},headers:{common:{Accept:"application/json, text/plain, */*"}}};S.forEach(["delete","get","head"],function(t){To.headers[t]={}});S.forEach(["post","put","patch"],function(t){To.headers[t]=S.merge(v0)});const ws=To,w0=S.toObjectSet(["age","authorization","content-length","content-type","etag","expires","from","host","if-modified-since","if-unmodified-since","last-modified","location","max-forwards","proxy-authorization","referer","retry-after","user-agent"]),S0=e=>{const t={};let n,r,l;return e&&e.split(`
-`).forEach(function(i){l=i.indexOf(":"),n=i.substring(0,l).trim().toLowerCase(),r=i.substring(l+1).trim(),!(!n||t[n]&&w0[n])&&(n==="set-cookie"?t[n]?t[n].push(r):t[n]=[r]:t[n]=t[n]?t[n]+", "+r:r)}),t},Ha=Symbol("internals");function rr(e){return e&&String(e).trim().toLowerCase()}function Nl(e){return e===!1||e==null?e:S.isArray(e)?e.map(Nl):String(e)}function E0(e){const t=Object.create(null),n=/([^\s,;=]+)\s*(?:=\s*([^,;]+))?/g;let r;for(;r=n.exec(e);)t[r[1]]=r[2];return t}const k0=e=>/^[-_a-zA-Z0-9^`|~,!#$%&'*+.]+$/.test(e.trim());function li(e,t,n,r,l){if(S.isFunction(r))return r.call(this,t,n);if(l&&(t=n),!!S.isString(t)){if(S.isString(r))return t.indexOf(r)!==-1;if(S.isRegExp(r))return r.test(t)}}function x0(e){return e.trim().toLowerCase().replace(/([a-z\d])(\w*)/g,(t,n,r)=>n.toUpperCase()+r)}function C0(e,t){const n=S.toCamelCase(" "+t);["get","set","has"].forEach(r=>{Object.defineProperty(e,r+n,{value:function(l,o,i){return this[r].call(this,t,l,o,i)},configurable:!0})})}class No{constructor(t){t&&this.set(t)}set(t,n,r){const l=this;function o(u,s,a){const c=rr(s);if(!c)throw new Error("header name must be a non-empty string");const d=S.findKey(l,c);(!d||l[d]===void 0||a===!0||a===void 0&&l[d]!==!1)&&(l[d||s]=Nl(u))}const i=(u,s)=>S.forEach(u,(a,c)=>o(a,c,s));return S.isPlainObject(t)||t instanceof this.constructor?i(t,n):S.isString(t)&&(t=t.trim())&&!k0(t)?i(S0(t),n):t!=null&&o(n,t,r),this}get(t,n){if(t=rr(t),t){const r=S.findKey(this,t);if(r){const l=this[r];if(!n)return l;if(n===!0)return E0(l);if(S.isFunction(n))return n.call(this,l,r);if(S.isRegExp(n))return n.exec(l);throw new TypeError("parser must be boolean|regexp|function")}}}has(t,n){if(t=rr(t),t){const r=S.findKey(this,t);return!!(r&&this[r]!==void 0&&(!n||li(this,this[r],r,n)))}return!1}delete(t,n){const r=this;let l=!1;function o(i){if(i=rr(i),i){const u=S.findKey(r,i);u&&(!n||li(r,r[u],u,n))&&(delete r[u],l=!0)}}return S.isArray(t)?t.forEach(o):o(t),l}clear(t){const n=Object.keys(this);let r=n.length,l=!1;for(;r--;){const o=n[r];(!t||li(this,this[o],o,t,!0))&&(delete this[o],l=!0)}return l}normalize(t){const n=this,r={};return S.forEach(this,(l,o)=>{const i=S.findKey(r,o);if(i){n[i]=Nl(l),delete n[o];return}const u=t?x0(o):String(o).trim();u!==o&&delete n[o],n[u]=Nl(l),r[u]=!0}),this}concat(...t){return this.constructor.concat(this,...t)}toJSON(t){const n=Object.create(null);return S.forEach(this,(r,l)=>{r!=null&&r!==!1&&(n[l]=t&&S.isArray(r)?r.join(", "):r)}),n}[Symbol.iterator](){return Object.entries(this.toJSON())[Symbol.iterator]()}toString(){return Object.entries(this.toJSON()).map(([t,n])=>t+": "+n).join(`
-`)}get[Symbol.toStringTag](){return"AxiosHeaders"}static from(t){return t instanceof this?t:new this(t)}static concat(t,...n){const r=new this(t);return n.forEach(l=>r.set(l)),r}static accessor(t){const r=(this[Ha]=this[Ha]={accessors:{}}).accessors,l=this.prototype;function o(i){const u=rr(i);r[u]||(C0(l,i),r[u]=!0)}return S.isArray(t)?t.forEach(o):o(t),this}}No.accessor(["Content-Type","Content-Length","Accept","Accept-Encoding","User-Agent","Authorization"]);S.freezeMethods(No.prototype);S.freezeMethods(No);const ht=No;function oi(e,t){const n=this||ws,r=t||n,l=ht.from(r.headers);let o=r.data;return S.forEach(e,function(u){o=u.call(n,o,l.normalize(),t?t.status:void 0)}),l.normalize(),o}function Rd(e){return!!(e&&e.__CANCEL__)}function Wr(e,t,n){z.call(this,e??"canceled",z.ERR_CANCELED,t,n),this.name="CanceledError"}S.inherits(Wr,z,{__CANCEL__:!0});function T0(e,t,n){const r=n.config.validateStatus;!n.status||!r||r(n.status)?e(n):t(new z("Request failed with status code "+n.status,[z.ERR_BAD_REQUEST,z.ERR_BAD_RESPONSE][Math.floor(n.status/100)-4],n.config,n.request,n))}const N0=tt.isStandardBrowserEnv?function(){return{write:function(n,r,l,o,i,u){const s=[];s.push(n+"="+encodeURIComponent(r)),S.isNumber(l)&&s.push("expires="+new Date(l).toGMTString()),S.isString(o)&&s.push("path="+o),S.isString(i)&&s.push("domain="+i),u===!0&&s.push("secure"),document.cookie=s.join("; ")},read:function(n){const r=document.cookie.match(new RegExp("(^|;\\s*)("+n+")=([^;]*)"));return r?decodeURIComponent(r[3]):null},remove:function(n){this.write(n,"",Date.now()-864e5)}}}():function(){return{write:function(){},read:function(){return null},remove:function(){}}}();function P0(e){return/^([a-z][a-z\d+\-.]*:)?\/\//i.test(e)}function L0(e,t){return t?e.replace(/\/+$/,"")+"/"+t.replace(/^\/+/,""):e}function Od(e,t){return e&&!P0(t)?L0(e,t):t}const _0=tt.isStandardBrowserEnv?function(){const t=/(msie|trident)/i.test(navigator.userAgent),n=document.createElement("a");let r;function l(o){let i=o;return t&&(n.setAttribute("href",i),i=n.href),n.setAttribute("href",i),{href:n.href,protocol:n.protocol?n.protocol.replace(/:$/,""):"",host:n.host,search:n.search?n.search.replace(/^\?/,""):"",hash:n.hash?n.hash.replace(/^#/,""):"",hostname:n.hostname,port:n.port,pathname:n.pathname.charAt(0)==="/"?n.pathname:"/"+n.pathname}}return r=l(window.location.href),function(i){const u=S.isString(i)?l(i):i;return u.protocol===r.protocol&&u.host===r.host}}():function(){return function(){return!0}}();function R0(e){const t=/^([-+\w]{1,25})(:?\/\/|:)/.exec(e);return t&&t[1]||""}function O0(e,t){e=e||10;const n=new Array(e),r=new Array(e);let l=0,o=0,i;return t=t!==void 0?t:1e3,function(s){const a=Date.now(),c=r[o];i||(i=a),n[l]=s,r[l]=a;let d=o,h=0;for(;d!==l;)h+=n[d++],d=d%e;if(l=(l+1)%e,l===o&&(o=(o+1)%e),a-i{const o=l.loaded,i=l.lengthComputable?l.total:void 0,u=o-n,s=r(u),a=o<=i;n=o;const c={loaded:o,total:i,progress:i?o/i:void 0,bytes:u,rate:s||void 0,estimated:s&&i&&a?(i-o)/s:void 0,event:l};c[t?"download":"upload"]=!0,e(c)}}const F0=typeof XMLHttpRequest<"u",A0=F0&&function(e){return new Promise(function(n,r){let l=e.data;const o=ht.from(e.headers).normalize(),i=e.responseType;let u;function s(){e.cancelToken&&e.cancelToken.unsubscribe(u),e.signal&&e.signal.removeEventListener("abort",u)}S.isFormData(l)&&(tt.isStandardBrowserEnv||tt.isStandardBrowserWebWorkerEnv?o.setContentType(!1):o.setContentType("multipart/form-data;",!1));let a=new XMLHttpRequest;if(e.auth){const y=e.auth.username||"",m=e.auth.password?unescape(encodeURIComponent(e.auth.password)):"";o.set("Authorization","Basic "+btoa(y+":"+m))}const c=Od(e.baseURL,e.url);a.open(e.method.toUpperCase(),Pd(c,e.params,e.paramsSerializer),!0),a.timeout=e.timeout;function d(){if(!a)return;const y=ht.from("getAllResponseHeaders"in a&&a.getAllResponseHeaders()),v={data:!i||i==="text"||i==="json"?a.responseText:a.response,status:a.status,statusText:a.statusText,headers:y,config:e,request:a};T0(function(p){n(p),s()},function(p){r(p),s()},v),a=null}if("onloadend"in a?a.onloadend=d:a.onreadystatechange=function(){!a||a.readyState!==4||a.status===0&&!(a.responseURL&&a.responseURL.indexOf("file:")===0)||setTimeout(d)},a.onabort=function(){a&&(r(new z("Request aborted",z.ECONNABORTED,e,a)),a=null)},a.onerror=function(){r(new z("Network Error",z.ERR_NETWORK,e,a)),a=null},a.ontimeout=function(){let m=e.timeout?"timeout of "+e.timeout+"ms exceeded":"timeout exceeded";const v=e.transitional||Ld;e.timeoutErrorMessage&&(m=e.timeoutErrorMessage),r(new z(m,v.clarifyTimeoutError?z.ETIMEDOUT:z.ECONNABORTED,e,a)),a=null},tt.isStandardBrowserEnv){const y=(e.withCredentials||_0(c))&&e.xsrfCookieName&&N0.read(e.xsrfCookieName);y&&o.set(e.xsrfHeaderName,y)}l===void 0&&o.setContentType(null),"setRequestHeader"in a&&S.forEach(o.toJSON(),function(m,v){a.setRequestHeader(v,m)}),S.isUndefined(e.withCredentials)||(a.withCredentials=!!e.withCredentials),i&&i!=="json"&&(a.responseType=e.responseType),typeof e.onDownloadProgress=="function"&&a.addEventListener("progress",Va(e.onDownloadProgress,!0)),typeof e.onUploadProgress=="function"&&a.upload&&a.upload.addEventListener("progress",Va(e.onUploadProgress)),(e.cancelToken||e.signal)&&(u=y=>{a&&(r(!y||y.type?new Wr(null,e,a):y),a.abort(),a=null)},e.cancelToken&&e.cancelToken.subscribe(u),e.signal&&(e.signal.aborted?u():e.signal.addEventListener("abort",u)));const h=R0(c);if(h&&tt.protocols.indexOf(h)===-1){r(new z("Unsupported protocol "+h+":",z.ERR_BAD_REQUEST,e));return}a.send(l||null)})},Pl={http:l0,xhr:A0};S.forEach(Pl,(e,t)=>{if(e){try{Object.defineProperty(e,"name",{value:t})}catch{}Object.defineProperty(e,"adapterName",{value:t})}});const $0={getAdapter:e=>{e=S.isArray(e)?e:[e];const{length:t}=e;let n,r;for(let l=0;le instanceof ht?e.toJSON():e;function Bn(e,t){t=t||{};const n={};function r(a,c,d){return S.isPlainObject(a)&&S.isPlainObject(c)?S.merge.call({caseless:d},a,c):S.isPlainObject(c)?S.merge({},c):S.isArray(c)?c.slice():c}function l(a,c,d){if(S.isUndefined(c)){if(!S.isUndefined(a))return r(void 0,a,d)}else return r(a,c,d)}function o(a,c){if(!S.isUndefined(c))return r(void 0,c)}function i(a,c){if(S.isUndefined(c)){if(!S.isUndefined(a))return r(void 0,a)}else return r(void 0,c)}function u(a,c,d){if(d in t)return r(a,c);if(d in e)return r(void 0,a)}const s={url:o,method:o,data:o,baseURL:i,transformRequest:i,transformResponse:i,paramsSerializer:i,timeout:i,timeoutMessage:i,withCredentials:i,adapter:i,responseType:i,xsrfCookieName:i,xsrfHeaderName:i,onUploadProgress:i,onDownloadProgress:i,decompress:i,maxContentLength:i,maxBodyLength:i,beforeRedirect:i,transport:i,httpAgent:i,httpsAgent:i,cancelToken:i,socketPath:i,responseEncoding:i,validateStatus:u,headers:(a,c)=>l(Qa(a),Qa(c),!0)};return S.forEach(Object.keys(Object.assign({},e,t)),function(c){const d=s[c]||l,h=d(e[c],t[c],c);S.isUndefined(h)&&d!==u||(n[c]=h)}),n}const Fd="1.4.0",Ss={};["object","boolean","number","function","string","symbol"].forEach((e,t)=>{Ss[e]=function(r){return typeof r===e||"a"+(t<1?"n ":" ")+e}});const ba={};Ss.transitional=function(t,n,r){function l(o,i){return"[Axios v"+Fd+"] Transitional option '"+o+"'"+i+(r?". "+r:"")}return(o,i,u)=>{if(t===!1)throw new z(l(i," has been removed"+(n?" in "+n:"")),z.ERR_DEPRECATED);return n&&!ba[i]&&(ba[i]=!0,console.warn(l(i," has been deprecated since v"+n+" and will be removed in the near future"))),t?t(o,i,u):!0}};function D0(e,t,n){if(typeof e!="object")throw new z("options must be an object",z.ERR_BAD_OPTION_VALUE);const r=Object.keys(e);let l=r.length;for(;l-- >0;){const o=r[l],i=t[o];if(i){const u=e[o],s=u===void 0||i(u,o,e);if(s!==!0)throw new z("option "+o+" must be "+s,z.ERR_BAD_OPTION_VALUE);continue}if(n!==!0)throw new z("Unknown option "+o,z.ERR_BAD_OPTION)}}const pu={assertOptions:D0,validators:Ss},Ct=pu.validators;class eo{constructor(t){this.defaults=t,this.interceptors={request:new Ba,response:new Ba}}request(t,n){typeof t=="string"?(n=n||{},n.url=t):n=t||{},n=Bn(this.defaults,n);const{transitional:r,paramsSerializer:l,headers:o}=n;r!==void 0&&pu.assertOptions(r,{silentJSONParsing:Ct.transitional(Ct.boolean),forcedJSONParsing:Ct.transitional(Ct.boolean),clarifyTimeoutError:Ct.transitional(Ct.boolean)},!1),l!=null&&(S.isFunction(l)?n.paramsSerializer={serialize:l}:pu.assertOptions(l,{encode:Ct.function,serialize:Ct.function},!0)),n.method=(n.method||this.defaults.method||"get").toLowerCase();let i;i=o&&S.merge(o.common,o[n.method]),i&&S.forEach(["delete","get","head","post","put","patch","common"],m=>{delete o[m]}),n.headers=ht.concat(i,o);const u=[];let s=!0;this.interceptors.request.forEach(function(v){typeof v.runWhen=="function"&&v.runWhen(n)===!1||(s=s&&v.synchronous,u.unshift(v.fulfilled,v.rejected))});const a=[];this.interceptors.response.forEach(function(v){a.push(v.fulfilled,v.rejected)});let c,d=0,h;if(!s){const m=[Wa.bind(this),void 0];for(m.unshift.apply(m,u),m.push.apply(m,a),h=m.length,c=Promise.resolve(n);d{if(!r._listeners)return;let o=r._listeners.length;for(;o-- >0;)r._listeners[o](l);r._listeners=null}),this.promise.then=l=>{let o;const i=new Promise(u=>{r.subscribe(u),o=u}).then(l);return i.cancel=function(){r.unsubscribe(o)},i},t(function(o,i,u){r.reason||(r.reason=new Wr(o,i,u),n(r.reason))})}throwIfRequested(){if(this.reason)throw this.reason}subscribe(t){if(this.reason){t(this.reason);return}this._listeners?this._listeners.push(t):this._listeners=[t]}unsubscribe(t){if(!this._listeners)return;const n=this._listeners.indexOf(t);n!==-1&&this._listeners.splice(n,1)}static source(){let t;return{token:new Es(function(l){t=l}),cancel:t}}}const z0=Es;function M0(e){return function(n){return e.apply(null,n)}}function j0(e){return S.isObject(e)&&e.isAxiosError===!0}const mu={Continue:100,SwitchingProtocols:101,Processing:102,EarlyHints:103,Ok:200,Created:201,Accepted:202,NonAuthoritativeInformation:203,NoContent:204,ResetContent:205,PartialContent:206,MultiStatus:207,AlreadyReported:208,ImUsed:226,MultipleChoices:300,MovedPermanently:301,Found:302,SeeOther:303,NotModified:304,UseProxy:305,Unused:306,TemporaryRedirect:307,PermanentRedirect:308,BadRequest:400,Unauthorized:401,PaymentRequired:402,Forbidden:403,NotFound:404,MethodNotAllowed:405,NotAcceptable:406,ProxyAuthenticationRequired:407,RequestTimeout:408,Conflict:409,Gone:410,LengthRequired:411,PreconditionFailed:412,PayloadTooLarge:413,UriTooLong:414,UnsupportedMediaType:415,RangeNotSatisfiable:416,ExpectationFailed:417,ImATeapot:418,MisdirectedRequest:421,UnprocessableEntity:422,Locked:423,FailedDependency:424,TooEarly:425,UpgradeRequired:426,PreconditionRequired:428,TooManyRequests:429,RequestHeaderFieldsTooLarge:431,UnavailableForLegalReasons:451,InternalServerError:500,NotImplemented:501,BadGateway:502,ServiceUnavailable:503,GatewayTimeout:504,HttpVersionNotSupported:505,VariantAlsoNegotiates:506,InsufficientStorage:507,LoopDetected:508,NotExtended:510,NetworkAuthenticationRequired:511};Object.entries(mu).forEach(([e,t])=>{mu[t]=e});const U0=mu;function Ad(e){const t=new Ll(e),n=hd(Ll.prototype.request,t);return S.extend(n,Ll.prototype,t,{allOwnKeys:!0}),S.extend(n,t,null,{allOwnKeys:!0}),n.create=function(l){return Ad(Bn(e,l))},n}const oe=Ad(ws);oe.Axios=Ll;oe.CanceledError=Wr;oe.CancelToken=z0;oe.isCancel=Rd;oe.VERSION=Fd;oe.toFormData=Co;oe.AxiosError=z;oe.Cancel=oe.CanceledError;oe.all=function(t){return Promise.all(t)};oe.spread=M0;oe.isAxiosError=j0;oe.mergeConfig=Bn;oe.AxiosHeaders=ht;oe.formToJSON=e=>_d(S.isHTMLForm(e)?new FormData(e):e);oe.HttpStatusCode=U0;oe.default=oe;const I0=oe;var B0=Object.defineProperty,H0=(e,t,n)=>t in e?B0(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n,ui=(e,t,n)=>(H0(e,typeof t!="symbol"?t+"":t,n),n);let V0=class{constructor(){ui(this,"current",this.detect()),ui(this,"handoffState","pending"),ui(this,"currentId",0)}set(t){this.current!==t&&(this.handoffState="pending",this.currentId=0,this.current=t)}reset(){this.set(this.detect())}nextId(){return++this.currentId}get isServer(){return this.current==="server"}get isClient(){return this.current==="client"}detect(){return typeof window>"u"||typeof document>"u"?"server":"client"}handoff(){this.handoffState==="pending"&&(this.handoffState="complete")}get isHandoffComplete(){return this.handoffState==="complete"}},gt=new V0,ot=(e,t)=>{gt.isServer?w.useEffect(e,t):w.useLayoutEffect(e,t)};function vt(e){let t=w.useRef(e);return ot(()=>{t.current=e},[e]),t}function Qr(e){typeof queueMicrotask=="function"?queueMicrotask(e):Promise.resolve().then(e).catch(t=>setTimeout(()=>{throw t}))}function bn(){let e=[],t={addEventListener(n,r,l,o){return n.addEventListener(r,l,o),t.add(()=>n.removeEventListener(r,l,o))},requestAnimationFrame(...n){let r=requestAnimationFrame(...n);return t.add(()=>cancelAnimationFrame(r))},nextFrame(...n){return t.requestAnimationFrame(()=>t.requestAnimationFrame(...n))},setTimeout(...n){let r=setTimeout(...n);return t.add(()=>clearTimeout(r))},microTask(...n){let r={current:!0};return Qr(()=>{r.current&&n[0]()}),t.add(()=>{r.current=!1})},style(n,r,l){let o=n.style.getPropertyValue(r);return Object.assign(n.style,{[r]:l}),this.add(()=>{Object.assign(n.style,{[r]:o})})},group(n){let r=bn();return n(r),this.add(()=>r.dispose())},add(n){return e.push(n),()=>{let r=e.indexOf(n);if(r>=0)for(let l of e.splice(r,1))l()}},dispose(){for(let n of e.splice(0))n()}};return t}function ks(){let[e]=w.useState(bn);return w.useEffect(()=>()=>e.dispose(),[e]),e}let ue=function(e){let t=vt(e);return D.useCallback((...n)=>t.current(...n),[t])};function Kn(){let[e,t]=w.useState(gt.isHandoffComplete);return e&>.isHandoffComplete===!1&&t(!1),w.useEffect(()=>{e!==!0&&t(!0)},[e]),w.useEffect(()=>gt.handoff(),[]),e}var Ka;let Gn=(Ka=D.useId)!=null?Ka:function(){let e=Kn(),[t,n]=D.useState(e?()=>gt.nextId():null);return ot(()=>{t===null&&n(gt.nextId())},[t]),t!=null?""+t:void 0};function he(e,t,...n){if(e in t){let l=t[e];return typeof l=="function"?l(...n):l}let r=new Error(`Tried to handle "${e}" but there is no handler defined. Only defined handlers are: ${Object.keys(t).map(l=>`"${l}"`).join(", ")}.`);throw Error.captureStackTrace&&Error.captureStackTrace(r,he),r}function $d(e){return gt.isServer?null:e instanceof Node?e.ownerDocument:e!=null&&e.hasOwnProperty("current")&&e.current instanceof Node?e.current.ownerDocument:document}let hu=["[contentEditable=true]","[tabindex]","a[href]","area[href]","button:not([disabled])","iframe","input:not([disabled])","select:not([disabled])","textarea:not([disabled])"].map(e=>`${e}:not([tabindex='-1'])`).join(",");var Xt=(e=>(e[e.First=1]="First",e[e.Previous=2]="Previous",e[e.Next=4]="Next",e[e.Last=8]="Last",e[e.WrapAround=16]="WrapAround",e[e.NoScroll=32]="NoScroll",e))(Xt||{}),Dd=(e=>(e[e.Error=0]="Error",e[e.Overflow=1]="Overflow",e[e.Success=2]="Success",e[e.Underflow=3]="Underflow",e))(Dd||{}),W0=(e=>(e[e.Previous=-1]="Previous",e[e.Next=1]="Next",e))(W0||{});function Q0(e=document.body){return e==null?[]:Array.from(e.querySelectorAll(hu)).sort((t,n)=>Math.sign((t.tabIndex||Number.MAX_SAFE_INTEGER)-(n.tabIndex||Number.MAX_SAFE_INTEGER)))}var zd=(e=>(e[e.Strict=0]="Strict",e[e.Loose=1]="Loose",e))(zd||{});function b0(e,t=0){var n;return e===((n=$d(e))==null?void 0:n.body)?!1:he(t,{[0](){return e.matches(hu)},[1](){let r=e;for(;r!==null;){if(r.matches(hu))return!0;r=r.parentElement}return!1}})}var K0=(e=>(e[e.Keyboard=0]="Keyboard",e[e.Mouse=1]="Mouse",e))(K0||{});typeof window<"u"&&typeof document<"u"&&(document.addEventListener("keydown",e=>{e.metaKey||e.altKey||e.ctrlKey||(document.documentElement.dataset.headlessuiFocusVisible="")},!0),document.addEventListener("click",e=>{e.detail===1?delete document.documentElement.dataset.headlessuiFocusVisible:e.detail===0&&(document.documentElement.dataset.headlessuiFocusVisible="")},!0));function ln(e){e==null||e.focus({preventScroll:!0})}let G0=["textarea","input"].join(",");function q0(e){var t,n;return(n=(t=e==null?void 0:e.matches)==null?void 0:t.call(e,G0))!=null?n:!1}function Y0(e,t=n=>n){return e.slice().sort((n,r)=>{let l=t(n),o=t(r);if(l===null||o===null)return 0;let i=l.compareDocumentPosition(o);return i&Node.DOCUMENT_POSITION_FOLLOWING?-1:i&Node.DOCUMENT_POSITION_PRECEDING?1:0})}function _l(e,t,{sorted:n=!0,relativeTo:r=null,skipElements:l=[]}={}){let o=Array.isArray(e)?e.length>0?e[0].ownerDocument:document:e.ownerDocument,i=Array.isArray(e)?n?Y0(e):e:Q0(e);l.length>0&&i.length>1&&(i=i.filter(y=>!l.includes(y))),r=r??o.activeElement;let u=(()=>{if(t&5)return 1;if(t&10)return-1;throw new Error("Missing Focus.First, Focus.Previous, Focus.Next or Focus.Last")})(),s=(()=>{if(t&1)return 0;if(t&2)return Math.max(0,i.indexOf(r))-1;if(t&4)return Math.max(0,i.indexOf(r))+1;if(t&8)return i.length-1;throw new Error("Missing Focus.First, Focus.Previous, Focus.Next or Focus.Last")})(),a=t&32?{preventScroll:!0}:{},c=0,d=i.length,h;do{if(c>=d||c+d<=0)return 0;let y=s+c;if(t&16)y=(y+d)%d;else{if(y<0)return 3;if(y>=d)return 1}h=i[y],h==null||h.focus(a),c+=u}while(h!==o.activeElement);return t&6&&q0(h)&&h.select(),2}function si(e,t,n){let r=vt(t);w.useEffect(()=>{function l(o){r.current(o)}return document.addEventListener(e,l,n),()=>document.removeEventListener(e,l,n)},[e,n])}function X0(e,t,n=!0){let r=w.useRef(!1);w.useEffect(()=>{requestAnimationFrame(()=>{r.current=n})},[n]);function l(i,u){if(!r.current||i.defaultPrevented)return;let s=function c(d){return typeof d=="function"?c(d()):Array.isArray(d)||d instanceof Set?d:[d]}(e),a=u(i);if(a!==null&&a.getRootNode().contains(a)){for(let c of s){if(c===null)continue;let d=c instanceof HTMLElement?c:c.current;if(d!=null&&d.contains(a)||i.composed&&i.composedPath().includes(d))return}return!b0(a,zd.Loose)&&a.tabIndex!==-1&&i.preventDefault(),t(i,a)}}let o=w.useRef(null);si("mousedown",i=>{var u,s;r.current&&(o.current=((s=(u=i.composedPath)==null?void 0:u.call(i))==null?void 0:s[0])||i.target)},!0),si("click",i=>{o.current&&(l(i,()=>o.current),o.current=null)},!0),si("blur",i=>l(i,()=>window.document.activeElement instanceof HTMLIFrameElement?window.document.activeElement:null),!0)}let Md=Symbol();function J0(e,t=!0){return Object.assign(e,{[Md]:t})}function Xe(...e){let t=w.useRef(e);w.useEffect(()=>{t.current=e},[e]);let n=ue(r=>{for(let l of t.current)l!=null&&(typeof l=="function"?l(r):l.current=r)});return e.every(r=>r==null||(r==null?void 0:r[Md]))?void 0:n}function gu(...e){return e.filter(Boolean).join(" ")}var to=(e=>(e[e.None=0]="None",e[e.RenderStrategy=1]="RenderStrategy",e[e.Static=2]="Static",e))(to||{}),pt=(e=>(e[e.Unmount=0]="Unmount",e[e.Hidden=1]="Hidden",e))(pt||{});function Be({ourProps:e,theirProps:t,slot:n,defaultTag:r,features:l,visible:o=!0,name:i}){let u=jd(t,e);if(o)return dl(u,n,r,i);let s=l??0;if(s&2){let{static:a=!1,...c}=u;if(a)return dl(c,n,r,i)}if(s&1){let{unmount:a=!0,...c}=u;return he(a?0:1,{[0](){return null},[1](){return dl({...c,hidden:!0,style:{display:"none"}},n,r,i)}})}return dl(u,n,r,i)}function dl(e,t={},n,r){let{as:l=n,children:o,refName:i="ref",...u}=ai(e,["unmount","static"]),s=e.ref!==void 0?{[i]:e.ref}:{},a=typeof o=="function"?o(t):o;"className"in u&&u.className&&typeof u.className=="function"&&(u.className=u.className(t));let c={};if(t){let d=!1,h=[];for(let[y,m]of Object.entries(t))typeof m=="boolean"&&(d=!0),m===!0&&h.push(y);d&&(c["data-headlessui-state"]=h.join(" "))}if(l===w.Fragment&&Object.keys(Ga(u)).length>0){if(!w.isValidElement(a)||Array.isArray(a)&&a.length>1)throw new Error(['Passing props on "Fragment"!',"",`The current component <${r} /> is rendering a "Fragment".`,"However we need to passthrough the following props:",Object.keys(u).map(m=>` - ${m}`).join(`
-`),"","You can apply a few solutions:",['Add an `as="..."` prop, to ensure that we render an actual element instead of a "Fragment".',"Render a single element as the child so that we can forward the props onto that element."].map(m=>` - ${m}`).join(`
-`)].join(`
-`));let d=a.props,h=typeof(d==null?void 0:d.className)=="function"?(...m)=>gu(d==null?void 0:d.className(...m),u.className):gu(d==null?void 0:d.className,u.className),y=h?{className:h}:{};return w.cloneElement(a,Object.assign({},jd(a.props,Ga(ai(u,["ref"]))),c,s,Z0(a.ref,s.ref),y))}return w.createElement(l,Object.assign({},ai(u,["ref"]),l!==w.Fragment&&s,l!==w.Fragment&&c),a)}function Z0(...e){return{ref:e.every(t=>t==null)?void 0:t=>{for(let n of e)n!=null&&(typeof n=="function"?n(t):n.current=t)}}}function jd(...e){if(e.length===0)return{};if(e.length===1)return e[0];let t={},n={};for(let r of e)for(let l in r)l.startsWith("on")&&typeof r[l]=="function"?(n[l]!=null||(n[l]=[]),n[l].push(r[l])):t[l]=r[l];if(t.disabled||t["aria-disabled"])return Object.assign(t,Object.fromEntries(Object.keys(n).map(r=>[r,void 0])));for(let r in n)Object.assign(t,{[r](l,...o){let i=n[r];for(let u of i){if((l instanceof Event||(l==null?void 0:l.nativeEvent)instanceof Event)&&l.defaultPrevented)return;u(l,...o)}}});return t}function Ae(e){var t;return Object.assign(w.forwardRef(e),{displayName:(t=e.displayName)!=null?t:e.name})}function Ga(e){let t=Object.assign({},e);for(let n in t)t[n]===void 0&&delete t[n];return t}function ai(e,t=[]){let n=Object.assign({},e);for(let r of t)r in n&&delete n[r];return n}function e1(e){let t=e.parentElement,n=null;for(;t&&!(t instanceof HTMLFieldSetElement);)t instanceof HTMLLegendElement&&(n=t),t=t.parentElement;let r=(t==null?void 0:t.getAttribute("disabled"))==="";return r&&t1(n)?!1:r}function t1(e){if(!e)return!1;let t=e.previousElementSibling;for(;t!==null;){if(t instanceof HTMLLegendElement)return!1;t=t.previousElementSibling}return!0}let n1="div";var no=(e=>(e[e.None=1]="None",e[e.Focusable=2]="Focusable",e[e.Hidden=4]="Hidden",e))(no||{});function r1(e,t){let{features:n=1,...r}=e,l={ref:t,"aria-hidden":(n&2)===2?!0:void 0,style:{position:"fixed",top:1,left:1,width:1,height:0,padding:0,margin:-1,overflow:"hidden",clip:"rect(0, 0, 0, 0)",whiteSpace:"nowrap",borderWidth:"0",...(n&4)===4&&(n&2)!==2&&{display:"none"}}};return Be({ourProps:l,theirProps:r,slot:{},defaultTag:n1,name:"Hidden"})}let vu=Ae(r1),xs=w.createContext(null);xs.displayName="OpenClosedContext";var Pe=(e=>(e[e.Open=1]="Open",e[e.Closed=2]="Closed",e[e.Closing=4]="Closing",e[e.Opening=8]="Opening",e))(Pe||{});function Cs(){return w.useContext(xs)}function l1({value:e,children:t}){return D.createElement(xs.Provider,{value:e},t)}var Ud=(e=>(e.Space=" ",e.Enter="Enter",e.Escape="Escape",e.Backspace="Backspace",e.Delete="Delete",e.ArrowLeft="ArrowLeft",e.ArrowUp="ArrowUp",e.ArrowRight="ArrowRight",e.ArrowDown="ArrowDown",e.Home="Home",e.End="End",e.PageUp="PageUp",e.PageDown="PageDown",e.Tab="Tab",e))(Ud||{});function Ts(e,t){let n=w.useRef([]),r=ue(e);w.useEffect(()=>{let l=[...n.current];for(let[o,i]of t.entries())if(n.current[o]!==i){let u=r(t,l);return n.current=t,u}},[r,...t])}function o1(){return/iPhone/gi.test(window.navigator.platform)||/Mac/gi.test(window.navigator.platform)&&window.navigator.maxTouchPoints>0}function i1(e,t,n){let r=vt(t);w.useEffect(()=>{function l(o){r.current(o)}return window.addEventListener(e,l,n),()=>window.removeEventListener(e,l,n)},[e,n])}var cr=(e=>(e[e.Forwards=0]="Forwards",e[e.Backwards=1]="Backwards",e))(cr||{});function u1(){let e=w.useRef(0);return i1("keydown",t=>{t.key==="Tab"&&(e.current=t.shiftKey?1:0)},!0),e}function br(){let e=w.useRef(!1);return ot(()=>(e.current=!0,()=>{e.current=!1}),[]),e}function Po(...e){return w.useMemo(()=>$d(...e),[...e])}function Id(e,t,n,r){let l=vt(n);w.useEffect(()=>{e=e??window;function o(i){l.current(i)}return e.addEventListener(t,o,r),()=>e.removeEventListener(t,o,r)},[e,t,r])}function s1(e){function t(){document.readyState!=="loading"&&(e(),document.removeEventListener("DOMContentLoaded",t))}typeof window<"u"&&typeof document<"u"&&(document.addEventListener("DOMContentLoaded",t),t())}function Bd(e){if(!e)return new Set;if(typeof e=="function")return new Set(e());let t=new Set;for(let n of e.current)n.current instanceof HTMLElement&&t.add(n.current);return t}let a1="div";var Hd=(e=>(e[e.None=1]="None",e[e.InitialFocus=2]="InitialFocus",e[e.TabLock=4]="TabLock",e[e.FocusLock=8]="FocusLock",e[e.RestoreFocus=16]="RestoreFocus",e[e.All=30]="All",e))(Hd||{});function c1(e,t){let n=w.useRef(null),r=Xe(n,t),{initialFocus:l,containers:o,features:i=30,...u}=e;Kn()||(i=1);let s=Po(n);p1({ownerDocument:s},!!(i&16));let a=m1({ownerDocument:s,container:n,initialFocus:l},!!(i&2));h1({ownerDocument:s,container:n,containers:o,previousActiveElement:a},!!(i&8));let c=u1(),d=ue(v=>{let P=n.current;P&&(p=>p())(()=>{he(c.current,{[cr.Forwards]:()=>{_l(P,Xt.First,{skipElements:[v.relatedTarget]})},[cr.Backwards]:()=>{_l(P,Xt.Last,{skipElements:[v.relatedTarget]})}})})}),h=ks(),y=w.useRef(!1),m={ref:r,onKeyDown(v){v.key=="Tab"&&(y.current=!0,h.requestAnimationFrame(()=>{y.current=!1}))},onBlur(v){let P=Bd(o);n.current instanceof HTMLElement&&P.add(n.current);let p=v.relatedTarget;p instanceof HTMLElement&&p.dataset.headlessuiFocusGuard!=="true"&&(Vd(P,p)||(y.current?_l(n.current,he(c.current,{[cr.Forwards]:()=>Xt.Next,[cr.Backwards]:()=>Xt.Previous})|Xt.WrapAround,{relativeTo:v.target}):v.target instanceof HTMLElement&&ln(v.target)))}};return D.createElement(D.Fragment,null,!!(i&4)&&D.createElement(vu,{as:"button",type:"button","data-headlessui-focus-guard":!0,onFocus:d,features:no.Focusable}),Be({ourProps:m,theirProps:u,defaultTag:a1,name:"FocusTrap"}),!!(i&4)&&D.createElement(vu,{as:"button",type:"button","data-headlessui-focus-guard":!0,onFocus:d,features:no.Focusable}))}let f1=Ae(c1),lr=Object.assign(f1,{features:Hd}),_t=[];s1(()=>{function e(t){t.target instanceof HTMLElement&&t.target!==document.body&&_t[0]!==t.target&&(_t.unshift(t.target),_t=_t.filter(n=>n!=null&&n.isConnected),_t.splice(10))}window.addEventListener("click",e,{capture:!0}),window.addEventListener("mousedown",e,{capture:!0}),window.addEventListener("focus",e,{capture:!0}),document.body.addEventListener("click",e,{capture:!0}),document.body.addEventListener("mousedown",e,{capture:!0}),document.body.addEventListener("focus",e,{capture:!0})});function d1(e=!0){let t=w.useRef(_t.slice());return Ts(([n],[r])=>{r===!0&&n===!1&&Qr(()=>{t.current.splice(0)}),r===!1&&n===!0&&(t.current=_t.slice())},[e,_t,t]),ue(()=>{var n;return(n=t.current.find(r=>r!=null&&r.isConnected))!=null?n:null})}function p1({ownerDocument:e},t){let n=d1(t);Ts(()=>{t||(e==null?void 0:e.activeElement)===(e==null?void 0:e.body)&&ln(n())},[t]);let r=w.useRef(!1);w.useEffect(()=>(r.current=!1,()=>{r.current=!0,Qr(()=>{r.current&&ln(n())})}),[])}function m1({ownerDocument:e,container:t,initialFocus:n},r){let l=w.useRef(null),o=br();return Ts(()=>{if(!r)return;let i=t.current;i&&Qr(()=>{if(!o.current)return;let u=e==null?void 0:e.activeElement;if(n!=null&&n.current){if((n==null?void 0:n.current)===u){l.current=u;return}}else if(i.contains(u)){l.current=u;return}n!=null&&n.current?ln(n.current):_l(i,Xt.First)===Dd.Error&&console.warn("There are no focusable elements inside the "),l.current=e==null?void 0:e.activeElement})},[r]),l}function h1({ownerDocument:e,container:t,containers:n,previousActiveElement:r},l){let o=br();Id(e==null?void 0:e.defaultView,"focus",i=>{if(!l||!o.current)return;let u=Bd(n);t.current instanceof HTMLElement&&u.add(t.current);let s=r.current;if(!s)return;let a=i.target;a&&a instanceof HTMLElement?Vd(u,a)?(r.current=a,ln(a)):(i.preventDefault(),i.stopPropagation(),ln(s)):ln(r.current)},!0)}function Vd(e,t){for(let n of e)if(n.contains(t))return!0;return!1}let Wd=w.createContext(!1);function g1(){return w.useContext(Wd)}function yu(e){return D.createElement(Wd.Provider,{value:e.force},e.children)}function v1(e){let t=g1(),n=w.useContext(Qd),r=Po(e),[l,o]=w.useState(()=>{if(!t&&n!==null||gt.isServer)return null;let i=r==null?void 0:r.getElementById("headlessui-portal-root");if(i)return i;if(r===null)return null;let u=r.createElement("div");return u.setAttribute("id","headlessui-portal-root"),r.body.appendChild(u)});return w.useEffect(()=>{l!==null&&(r!=null&&r.body.contains(l)||r==null||r.body.appendChild(l))},[l,r]),w.useEffect(()=>{t||n!==null&&o(n.current)},[n,o,t]),l}let y1=w.Fragment;function w1(e,t){let n=e,r=w.useRef(null),l=Xe(J0(c=>{r.current=c}),t),o=Po(r),i=v1(r),[u]=w.useState(()=>{var c;return gt.isServer?null:(c=o==null?void 0:o.createElement("div"))!=null?c:null}),s=Kn(),a=w.useRef(!1);return ot(()=>{if(a.current=!1,!(!i||!u))return i.contains(u)||(u.setAttribute("data-headlessui-portal",""),i.appendChild(u)),()=>{a.current=!0,Qr(()=>{var c;a.current&&(!i||!u||(u instanceof Node&&i.contains(u)&&i.removeChild(u),i.childNodes.length<=0&&((c=i.parentElement)==null||c.removeChild(i))))})}},[i,u]),s?!i||!u?null:md.createPortal(Be({ourProps:{ref:l},theirProps:n,defaultTag:y1,name:"Portal"}),u):null}let S1=w.Fragment,Qd=w.createContext(null);function E1(e,t){let{target:n,...r}=e,l={ref:Xe(t)};return D.createElement(Qd.Provider,{value:n},Be({ourProps:l,theirProps:r,defaultTag:S1,name:"Popover.Group"}))}let k1=Ae(w1),x1=Ae(E1),wu=Object.assign(k1,{Group:x1}),bd=w.createContext(null);function Kd(){let e=w.useContext(bd);if(e===null){let t=new Error("You used a component, but it is not inside a relevant parent.");throw Error.captureStackTrace&&Error.captureStackTrace(t,Kd),t}return e}function C1(){let[e,t]=w.useState([]);return[e.length>0?e.join(" "):void 0,w.useMemo(()=>function(n){let r=ue(o=>(t(i=>[...i,o]),()=>t(i=>{let u=i.slice(),s=u.indexOf(o);return s!==-1&&u.splice(s,1),u}))),l=w.useMemo(()=>({register:r,slot:n.slot,name:n.name,props:n.props}),[r,n.slot,n.name,n.props]);return D.createElement(bd.Provider,{value:l},n.children)},[t])]}let T1="p";function N1(e,t){let n=Gn(),{id:r=`headlessui-description-${n}`,...l}=e,o=Kd(),i=Xe(t);ot(()=>o.register(r),[r,o.register]);let u={ref:i,...o.props,id:r};return Be({ourProps:u,theirProps:l,slot:o.slot||{},defaultTag:T1,name:o.name||"Description"})}let P1=Ae(N1),L1=Object.assign(P1,{}),Ns=w.createContext(()=>{});Ns.displayName="StackContext";var Su=(e=>(e[e.Add=0]="Add",e[e.Remove=1]="Remove",e))(Su||{});function _1(){return w.useContext(Ns)}function R1({children:e,onUpdate:t,type:n,element:r,enabled:l}){let o=_1(),i=ue((...u)=>{t==null||t(...u),o(...u)});return ot(()=>{let u=l===void 0||l===!0;return u&&i(0,n,r),()=>{u&&i(1,n,r)}},[i,n,r,l]),D.createElement(Ns.Provider,{value:i},e)}function O1(e,t){return e===t&&(e!==0||1/e===1/t)||e!==e&&t!==t}const F1=typeof Object.is=="function"?Object.is:O1,{useState:A1,useEffect:$1,useLayoutEffect:D1,useDebugValue:z1}=vi;function M1(e,t,n){const r=t(),[{inst:l},o]=A1({inst:{value:r,getSnapshot:t}});return D1(()=>{l.value=r,l.getSnapshot=t,ci(l)&&o({inst:l})},[e,r,t]),$1(()=>(ci(l)&&o({inst:l}),e(()=>{ci(l)&&o({inst:l})})),[e]),z1(r),r}function ci(e){const t=e.getSnapshot,n=e.value;try{const r=t();return!F1(n,r)}catch{return!0}}function j1(e,t,n){return t()}const U1=typeof window<"u"&&typeof window.document<"u"&&typeof window.document.createElement<"u",I1=!U1,B1=I1?j1:M1,H1="useSyncExternalStore"in vi?(e=>e.useSyncExternalStore)(vi):B1;function V1(e){return H1(e.subscribe,e.getSnapshot,e.getSnapshot)}function W1(e,t){let n=e(),r=new Set;return{getSnapshot(){return n},subscribe(l){return r.add(l),()=>r.delete(l)},dispatch(l,...o){let i=t[l].call(n,...o);i&&(n=i,r.forEach(u=>u()))}}}function Q1(){let e;return{before({doc:t}){var n;let r=t.documentElement;e=((n=t.defaultView)!=null?n:window).innerWidth-r.clientWidth},after({doc:t,d:n}){let r=t.documentElement,l=r.clientWidth-r.offsetWidth,o=e-l;n.style(r,"paddingRight",`${o}px`)}}}function b1(){if(!o1())return{};let e;return{before(){e=window.pageYOffset},after({doc:t,d:n,meta:r}){function l(i){return r.containers.flatMap(u=>u()).some(u=>u.contains(i))}n.style(t.body,"marginTop",`-${e}px`),window.scrollTo(0,0);let o=null;n.addEventListener(t,"click",i=>{if(i.target instanceof HTMLElement)try{let u=i.target.closest("a");if(!u)return;let{hash:s}=new URL(u.href),a=t.querySelector(s);a&&!l(a)&&(o=a)}catch{}},!0),n.addEventListener(t,"touchmove",i=>{i.target instanceof HTMLElement&&!l(i.target)&&i.preventDefault()},{passive:!1}),n.add(()=>{window.scrollTo(0,window.pageYOffset+e),o&&o.isConnected&&(o.scrollIntoView({block:"nearest"}),o=null)})}}}function K1(){return{before({doc:e,d:t}){t.style(e.documentElement,"overflow","hidden")}}}function G1(e){let t={};for(let n of e)Object.assign(t,n(t));return t}let tn=W1(()=>new Map,{PUSH(e,t){var n;let r=(n=this.get(e))!=null?n:{doc:e,count:0,d:bn(),meta:new Set};return r.count++,r.meta.add(t),this.set(e,r),this},POP(e,t){let n=this.get(e);return n&&(n.count--,n.meta.delete(t)),this},SCROLL_PREVENT({doc:e,d:t,meta:n}){let r={doc:e,d:t,meta:G1(n)},l=[b1(),Q1(),K1()];l.forEach(({before:o})=>o==null?void 0:o(r)),l.forEach(({after:o})=>o==null?void 0:o(r))},SCROLL_ALLOW({d:e}){e.dispose()},TEARDOWN({doc:e}){this.delete(e)}});tn.subscribe(()=>{let e=tn.getSnapshot(),t=new Map;for(let[n]of e)t.set(n,n.documentElement.style.overflow);for(let n of e.values()){let r=t.get(n.doc)==="hidden",l=n.count!==0;(l&&!r||!l&&r)&&tn.dispatch(n.count>0?"SCROLL_PREVENT":"SCROLL_ALLOW",n),n.count===0&&tn.dispatch("TEARDOWN",n)}});function q1(e,t,n){let r=V1(tn),l=e?r.get(e):void 0,o=l?l.count>0:!1;return ot(()=>{if(!(!e||!t))return tn.dispatch("PUSH",e,n),()=>tn.dispatch("POP",e,n)},[t,e]),o}let fi=new Map,or=new Map;function qa(e,t=!0){ot(()=>{var n;if(!t)return;let r=typeof e=="function"?e():e.current;if(!r)return;function l(){var i;if(!r)return;let u=(i=or.get(r))!=null?i:1;if(u===1?or.delete(r):or.set(r,u-1),u!==1)return;let s=fi.get(r);s&&(s["aria-hidden"]===null?r.removeAttribute("aria-hidden"):r.setAttribute("aria-hidden",s["aria-hidden"]),r.inert=s.inert,fi.delete(r))}let o=(n=or.get(r))!=null?n:0;return or.set(r,o+1),o!==0||(fi.set(r,{"aria-hidden":r.getAttribute("aria-hidden"),inert:r.inert}),r.setAttribute("aria-hidden","true"),r.inert=!0),l},[e,t])}var Y1=(e=>(e[e.Open=0]="Open",e[e.Closed=1]="Closed",e))(Y1||{}),X1=(e=>(e[e.SetTitleId=0]="SetTitleId",e))(X1||{});let J1={[0](e,t){return e.titleId===t.id?e:{...e,titleId:t.id}}},ro=w.createContext(null);ro.displayName="DialogContext";function Kr(e){let t=w.useContext(ro);if(t===null){let n=new Error(`<${e} /> is missing a parent component.`);throw Error.captureStackTrace&&Error.captureStackTrace(n,Kr),n}return t}function Z1(e,t,n=()=>[document.body]){q1(e,t,r=>{var l;return{containers:[...(l=r.containers)!=null?l:[],n]}})}function eg(e,t){return he(t.type,J1,e,t)}let tg="div",ng=to.RenderStrategy|to.Static;function rg(e,t){let n=Gn(),{id:r=`headlessui-dialog-${n}`,open:l,onClose:o,initialFocus:i,__demoMode:u=!1,...s}=e,[a,c]=w.useState(0),d=Cs();l===void 0&&d!==null&&(l=(d&Pe.Open)===Pe.Open);let h=w.useRef(null),y=Xe(h,t),m=w.useRef(null),v=Po(h),P=e.hasOwnProperty("open")||d!==null,p=e.hasOwnProperty("onClose");if(!P&&!p)throw new Error("You have to provide an `open` and an `onClose` prop to the `Dialog` component.");if(!P)throw new Error("You provided an `onClose` prop to the `Dialog`, but forgot an `open` prop.");if(!p)throw new Error("You provided an `open` prop to the `Dialog`, but forgot an `onClose` prop.");if(typeof l!="boolean")throw new Error(`You provided an \`open\` prop to the \`Dialog\`, but the value is not a boolean. Received: ${l}`);if(typeof o!="function")throw new Error(`You provided an \`onClose\` prop to the \`Dialog\`, but the value is not a function. Received: ${o}`);let f=l?0:1,[g,E]=w.useReducer(eg,{titleId:null,descriptionId:null,panelRef:w.createRef()}),C=ue(()=>o(!1)),L=ue(b=>E({type:0,id:b})),T=Kn()?u?!1:f===0:!1,_=a>1,U=w.useContext(ro)!==null,O=_?"parent":"leaf",V=d!==null?(d&Pe.Closing)===Pe.Closing:!1,He=(()=>U||V?!1:T)(),Ve=w.useCallback(()=>{var b,te;return(te=Array.from((b=v==null?void 0:v.querySelectorAll("body > *"))!=null?b:[]).find(G=>G.id==="headlessui-portal-root"?!1:G.contains(m.current)&&G instanceof HTMLElement))!=null?te:null},[m]);qa(Ve,He);let mn=(()=>_?!0:T)(),it=w.useCallback(()=>{var b,te;return(te=Array.from((b=v==null?void 0:v.querySelectorAll("[data-headlessui-portal]"))!=null?b:[]).find(G=>G.contains(m.current)&&G instanceof HTMLElement))!=null?te:null},[m]);qa(it,mn);let ut=ue(()=>{var b,te;return[...Array.from((b=v==null?void 0:v.querySelectorAll("html > *, body > *, [data-headlessui-portal]"))!=null?b:[]).filter(G=>!(G===document.body||G===document.head||!(G instanceof HTMLElement)||G.contains(m.current)||g.panelRef.current&&G.contains(g.panelRef.current))),(te=g.panelRef.current)!=null?te:h.current]}),bt=(()=>!(!T||_))();X0(()=>ut(),C,bt);let N=(()=>!(_||f!==0))();Id(v==null?void 0:v.defaultView,"keydown",b=>{N&&(b.defaultPrevented||b.key===Ud.Escape&&(b.preventDefault(),b.stopPropagation(),C()))});let F=(()=>!(V||f!==0||U))();Z1(v,F,ut),w.useEffect(()=>{if(f!==0||!h.current)return;let b=new ResizeObserver(te=>{for(let G of te){let Gr=G.target.getBoundingClientRect();Gr.x===0&&Gr.y===0&&Gr.width===0&&Gr.height===0&&C()}});return b.observe(h.current),()=>b.disconnect()},[f,h,C]);let[A,j]=C1(),ee=w.useMemo(()=>[{dialogState:f,close:C,setTitleId:L},g],[f,g,C,L]),Kt=w.useMemo(()=>({open:f===0}),[f]),st={ref:y,id:r,role:"dialog","aria-modal":f===0?!0:void 0,"aria-labelledby":g.titleId,"aria-describedby":A};return D.createElement(R1,{type:"Dialog",enabled:f===0,element:h,onUpdate:ue((b,te)=>{te==="Dialog"&&he(b,{[Su.Add]:()=>c(G=>G+1),[Su.Remove]:()=>c(G=>G-1)})})},D.createElement(yu,{force:!0},D.createElement(wu,null,D.createElement(ro.Provider,{value:ee},D.createElement(wu.Group,{target:h},D.createElement(yu,{force:!1},D.createElement(j,{slot:Kt,name:"Dialog.Description"},D.createElement(lr,{initialFocus:i,containers:ut,features:T?he(O,{parent:lr.features.RestoreFocus,leaf:lr.features.All&~lr.features.FocusLock}):lr.features.None},Be({ourProps:st,theirProps:s,slot:Kt,defaultTag:tg,features:ng,visible:f===0,name:"Dialog"})))))))),D.createElement(vu,{features:no.Hidden,ref:m}))}let lg="div";function og(e,t){let n=Gn(),{id:r=`headlessui-dialog-overlay-${n}`,...l}=e,[{dialogState:o,close:i}]=Kr("Dialog.Overlay"),u=Xe(t),s=ue(c=>{if(c.target===c.currentTarget){if(e1(c.currentTarget))return c.preventDefault();c.preventDefault(),c.stopPropagation(),i()}}),a=w.useMemo(()=>({open:o===0}),[o]);return Be({ourProps:{ref:u,id:r,"aria-hidden":!0,onClick:s},theirProps:l,slot:a,defaultTag:lg,name:"Dialog.Overlay"})}let ig="div";function ug(e,t){let n=Gn(),{id:r=`headlessui-dialog-backdrop-${n}`,...l}=e,[{dialogState:o},i]=Kr("Dialog.Backdrop"),u=Xe(t);w.useEffect(()=>{if(i.panelRef.current===null)throw new Error("A component is being used, but a component is missing.")},[i.panelRef]);let s=w.useMemo(()=>({open:o===0}),[o]);return D.createElement(yu,{force:!0},D.createElement(wu,null,Be({ourProps:{ref:u,id:r,"aria-hidden":!0},theirProps:l,slot:s,defaultTag:ig,name:"Dialog.Backdrop"})))}let sg="div";function ag(e,t){let n=Gn(),{id:r=`headlessui-dialog-panel-${n}`,...l}=e,[{dialogState:o},i]=Kr("Dialog.Panel"),u=Xe(t,i.panelRef),s=w.useMemo(()=>({open:o===0}),[o]),a=ue(c=>{c.stopPropagation()});return Be({ourProps:{ref:u,id:r,onClick:a},theirProps:l,slot:s,defaultTag:sg,name:"Dialog.Panel"})}let cg="h2";function fg(e,t){let n=Gn(),{id:r=`headlessui-dialog-title-${n}`,...l}=e,[{dialogState:o,setTitleId:i}]=Kr("Dialog.Title"),u=Xe(t);w.useEffect(()=>(i(r),()=>i(null)),[r,i]);let s=w.useMemo(()=>({open:o===0}),[o]);return Be({ourProps:{ref:u,id:r},theirProps:l,slot:s,defaultTag:cg,name:"Dialog.Title"})}let dg=Ae(rg),pg=Ae(ug),mg=Ae(ag),hg=Ae(og),gg=Ae(fg),di=Object.assign(dg,{Backdrop:pg,Panel:mg,Overlay:hg,Title:gg,Description:L1});function vg(e=0){let[t,n]=w.useState(e),r=br(),l=w.useCallback(s=>{r.current&&n(a=>a|s)},[t,r]),o=w.useCallback(s=>!!(t&s),[t]),i=w.useCallback(s=>{r.current&&n(a=>a&~s)},[n,r]),u=w.useCallback(s=>{r.current&&n(a=>a^s)},[n]);return{flags:t,addFlag:l,hasFlag:o,removeFlag:i,toggleFlag:u}}function yg(e){let t={called:!1};return(...n)=>{if(!t.called)return t.called=!0,e(...n)}}function pi(e,...t){e&&t.length>0&&e.classList.add(...t)}function mi(e,...t){e&&t.length>0&&e.classList.remove(...t)}function wg(e,t){let n=bn();if(!e)return n.dispose;let{transitionDuration:r,transitionDelay:l}=getComputedStyle(e),[o,i]=[r,l].map(s=>{let[a=0]=s.split(",").filter(Boolean).map(c=>c.includes("ms")?parseFloat(c):parseFloat(c)*1e3).sort((c,d)=>d-c);return a}),u=o+i;if(u!==0){n.group(a=>{a.setTimeout(()=>{t(),a.dispose()},u),a.addEventListener(e,"transitionrun",c=>{c.target===c.currentTarget&&a.dispose()})});let s=n.addEventListener(e,"transitionend",a=>{a.target===a.currentTarget&&(t(),s())})}else t();return n.add(()=>t()),n.dispose}function Sg(e,t,n,r){let l=n?"enter":"leave",o=bn(),i=r!==void 0?yg(r):()=>{};l==="enter"&&(e.removeAttribute("hidden"),e.style.display="");let u=he(l,{enter:()=>t.enter,leave:()=>t.leave}),s=he(l,{enter:()=>t.enterTo,leave:()=>t.leaveTo}),a=he(l,{enter:()=>t.enterFrom,leave:()=>t.leaveFrom});return mi(e,...t.enter,...t.enterTo,...t.enterFrom,...t.leave,...t.leaveFrom,...t.leaveTo,...t.entered),pi(e,...u,...a),o.nextFrame(()=>{mi(e,...a),pi(e,...s),wg(e,()=>(mi(e,...u),pi(e,...t.entered),i()))}),o.dispose}function Eg({container:e,direction:t,classes:n,onStart:r,onStop:l}){let o=br(),i=ks(),u=vt(t);ot(()=>{let s=bn();i.add(s.dispose);let a=e.current;if(a&&u.current!=="idle"&&o.current)return s.dispose(),r.current(u.current),s.add(Sg(a,n.current,u.current==="enter",()=>{s.dispose(),l.current(u.current)})),s.dispose},[t])}function Gt(e=""){return e.split(" ").filter(t=>t.trim().length>1)}let Lo=w.createContext(null);Lo.displayName="TransitionContext";var kg=(e=>(e.Visible="visible",e.Hidden="hidden",e))(kg||{});function xg(){let e=w.useContext(Lo);if(e===null)throw new Error("A is used but it is missing a parent or .");return e}function Cg(){let e=w.useContext(_o);if(e===null)throw new Error("A is used but it is missing a parent or .");return e}let _o=w.createContext(null);_o.displayName="NestingContext";function Ro(e){return"children"in e?Ro(e.children):e.current.filter(({el:t})=>t.current!==null).filter(({state:t})=>t==="visible").length>0}function Gd(e,t){let n=vt(e),r=w.useRef([]),l=br(),o=ks(),i=ue((y,m=pt.Hidden)=>{let v=r.current.findIndex(({el:P})=>P===y);v!==-1&&(he(m,{[pt.Unmount](){r.current.splice(v,1)},[pt.Hidden](){r.current[v].state="hidden"}}),o.microTask(()=>{var P;!Ro(r)&&l.current&&((P=n.current)==null||P.call(n))}))}),u=ue(y=>{let m=r.current.find(({el:v})=>v===y);return m?m.state!=="visible"&&(m.state="visible"):r.current.push({el:y,state:"visible"}),()=>i(y,pt.Unmount)}),s=w.useRef([]),a=w.useRef(Promise.resolve()),c=w.useRef({enter:[],leave:[],idle:[]}),d=ue((y,m,v)=>{s.current.splice(0),t&&(t.chains.current[m]=t.chains.current[m].filter(([P])=>P!==y)),t==null||t.chains.current[m].push([y,new Promise(P=>{s.current.push(P)})]),t==null||t.chains.current[m].push([y,new Promise(P=>{Promise.all(c.current[m].map(([p,f])=>f)).then(()=>P())})]),m==="enter"?a.current=a.current.then(()=>t==null?void 0:t.wait.current).then(()=>v(m)):v(m)}),h=ue((y,m,v)=>{Promise.all(c.current[m].splice(0).map(([P,p])=>p)).then(()=>{var P;(P=s.current.shift())==null||P()}).then(()=>v(m))});return w.useMemo(()=>({children:r,register:u,unregister:i,onStart:d,onStop:h,wait:a,chains:c}),[u,i,r,d,h,c,a])}function Tg(){}let Ng=["beforeEnter","afterEnter","beforeLeave","afterLeave"];function Ya(e){var t;let n={};for(let r of Ng)n[r]=(t=e[r])!=null?t:Tg;return n}function Pg(e){let t=w.useRef(Ya(e));return w.useEffect(()=>{t.current=Ya(e)},[e]),t}let Lg="div",qd=to.RenderStrategy;function _g(e,t){let{beforeEnter:n,afterEnter:r,beforeLeave:l,afterLeave:o,enter:i,enterFrom:u,enterTo:s,entered:a,leave:c,leaveFrom:d,leaveTo:h,...y}=e,m=w.useRef(null),v=Xe(m,t),P=y.unmount?pt.Unmount:pt.Hidden,{show:p,appear:f,initial:g}=xg(),[E,C]=w.useState(p?"visible":"hidden"),L=Cg(),{register:T,unregister:_}=L,U=w.useRef(null);w.useEffect(()=>T(m),[T,m]),w.useEffect(()=>{if(P===pt.Hidden&&m.current){if(p&&E!=="visible"){C("visible");return}return he(E,{hidden:()=>_(m),visible:()=>T(m)})}},[E,m,T,_,p,P]);let O=vt({enter:Gt(i),enterFrom:Gt(u),enterTo:Gt(s),entered:Gt(a),leave:Gt(c),leaveFrom:Gt(d),leaveTo:Gt(h)}),V=Pg({beforeEnter:n,afterEnter:r,beforeLeave:l,afterLeave:o}),He=Kn();w.useEffect(()=>{if(He&&E==="visible"&&m.current===null)throw new Error("Did you forget to passthrough the `ref` to the actual DOM node?")},[m,E,He]);let Ve=g&&!f,mn=(()=>!He||Ve||U.current===p?"idle":p?"enter":"leave")(),it=vg(0),ut=ue(j=>he(j,{enter:()=>{it.addFlag(Pe.Opening),V.current.beforeEnter()},leave:()=>{it.addFlag(Pe.Closing),V.current.beforeLeave()},idle:()=>{}})),bt=ue(j=>he(j,{enter:()=>{it.removeFlag(Pe.Opening),V.current.afterEnter()},leave:()=>{it.removeFlag(Pe.Closing),V.current.afterLeave()},idle:()=>{}})),N=Gd(()=>{C("hidden"),_(m)},L);Eg({container:m,classes:O,direction:mn,onStart:vt(j=>{N.onStart(m,j,ut)}),onStop:vt(j=>{N.onStop(m,j,bt),j==="leave"&&!Ro(N)&&(C("hidden"),_(m))})}),w.useEffect(()=>{Ve&&(P===pt.Hidden?U.current=null:U.current=p)},[p,Ve,E]);let F=y,A={ref:v};return f&&p&&(F={...F,className:gu(y.className,...O.current.enter,...O.current.enterFrom)}),D.createElement(_o.Provider,{value:N},D.createElement(l1,{value:he(E,{visible:Pe.Open,hidden:Pe.Closed})|it.flags},Be({ourProps:A,theirProps:F,defaultTag:Lg,features:qd,visible:E==="visible",name:"Transition.Child"})))}function Rg(e,t){let{show:n,appear:r=!1,unmount:l,...o}=e,i=w.useRef(null),u=Xe(i,t);Kn();let s=Cs();if(n===void 0&&s!==null&&(n=(s&Pe.Open)===Pe.Open),![!0,!1].includes(n))throw new Error("A is used but it is missing a `show={true | false}` prop.");let[a,c]=w.useState(n?"visible":"hidden"),d=Gd(()=>{c("hidden")}),[h,y]=w.useState(!0),m=w.useRef([n]);ot(()=>{h!==!1&&m.current[m.current.length-1]!==n&&(m.current.push(n),y(!1))},[m,n]);let v=w.useMemo(()=>({show:n,appear:r,initial:h}),[n,r,h]);w.useEffect(()=>{if(n)c("visible");else if(!Ro(d))c("hidden");else{let p=i.current;if(!p)return;let f=p.getBoundingClientRect();f.x===0&&f.y===0&&f.width===0&&f.height===0&&c("hidden")}},[n,d]);let P={unmount:l};return D.createElement(_o.Provider,{value:d},D.createElement(Lo.Provider,{value:v},Be({ourProps:{...P,as:w.Fragment,children:D.createElement(Yd,{ref:u,...P,...o})},theirProps:{},defaultTag:w.Fragment,features:qd,visible:a==="visible",name:"Transition"})))}function Og(e,t){let n=w.useContext(Lo)!==null,r=Cs()!==null;return D.createElement(D.Fragment,null,!n&&r?D.createElement(Eu,{ref:t,...e}):D.createElement(Yd,{ref:t,...e}))}let Eu=Ae(Rg),Yd=Ae(_g),Fg=Ae(Og),hi=Object.assign(Eu,{Child:Fg,Root:Eu});function Xd({show:e,onClose:t,onSubmit:n,title:r,content:l,submitText:o}){return x(hi,{appear:!0,show:e,as:w.Fragment,children:B(di,{as:"div",className:"relative z-10",onClose:t,children:[x(hi.Child,{as:w.Fragment,enter:"ease-out duration-300",enterFrom:"opacity-0",enterTo:"opacity-100",leave:"ease-in duration-200",leaveFrom:"opacity-100",leaveTo:"opacity-0",children:x("div",{className:"fixed inset-0 bg-black bg-opacity-25"})}),x("div",{className:"fixed inset-0 overflow-y-auto",children:x("div",{className:"flex min-h-full items-center justify-center p-4 text-center",children:x(hi.Child,{as:w.Fragment,enter:"ease-out duration-300",enterFrom:"opacity-0 scale-95",enterTo:"opacity-100 scale-100",leave:"ease-in duration-200",leaveFrom:"opacity-100 scale-100",leaveTo:"opacity-0 scale-95",children:B(di.Panel,{className:"w-full max-w-md transform overflow-hidden rounded-2xl bg-white p-6 text-left align-middle shadow-xl transition-all",children:[x(di.Title,{as:"h3",className:"text-lg font-medium leading-6 text-gray-900",children:r}),x("div",{className:"mt-3 text-sm text-gray-500",children:l}),B("div",{className:"mt-4 flex flex-row-reverse",children:[o&&x("button",{type:"button",className:"inline-flex ml-4 justify-center rounded-md border border-transparent bg-indigo-600 px-4 py-2 text-sm font-medium text-indigo-100 hover:bg-indigo-500 focus:outline-none focus-visible:ring-2 focus-visible:ring-indigo-500 focus-visible:ring-offset-2 transition-all duration-300",onClick:n,children:o}),x("button",{type:"button",className:"inline-flex justify-center rounded-md border border-transparent bg-indigo-100 px-4 py-2 text-sm font-medium text-indigo-900 hover:bg-indigo-200 focus:outline-none focus-visible:ring-2 focus-visible:ring-indigo-500 focus-visible:ring-offset-2 transition-all duration-300",onClick:t,children:"Close"})]})]})})})})]})})}function Ag(e){return x("div",{children:x("input",{...e,type:"url",className:"my-2 bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500",placeholder:"www.example.com",required:!0})})}function $g(e){const t=w.useRef(null),n=w.useRef(null);return w.useEffect(()=>{t.current&&n.current&&(n.current.src=e.audioUrl,t.current.load())},[e.audioUrl]),x("div",{className:"flex relative z-10 p-4 w-full",children:x("audio",{ref:t,controls:!0,className:"w-full h-14 rounded-lg bg-white shadow-xl shadow-black/5 ring-1 ring-slate-700/10",children:x("source",{ref:n,type:"audio/wav"})})})}function Dg(e){const{isModelLoading:t,isTranscribing:n,onClick:r,...l}=e;return x("button",{...l,onClick:o=>{r&&!n&&!t&&r(o)},disabled:n,className:"text-white bg-blue-700 hover:bg-blue-800 focus:ring-4 focus:ring-blue-300 font-medium rounded-lg text-sm px-5 py-2.5 text-center mr-2 dark:bg-blue-600 dark:hover:bg-blue-700 dark:focus:ring-blue-800 inline-flex items-center",children:t?x(Xa,{text:"Loading model..."}):n?x(Xa,{text:"Transcribing..."}):"Transcribe Audio"})}function Xa(e){return B("div",{role:"status",children:[B("svg",{"aria-hidden":"true",role:"status",className:"inline w-4 h-4 mr-3 text-white animate-spin",viewBox:"0 0 100 101",fill:"none",xmlns:"http://www.w3.org/2000/svg",children:[x("path",{d:"M100 50.5908C100 78.2051 77.6142 100.591 50 100.591C22.3858 100.591 0 78.2051 0 50.5908C0 22.9766 22.3858 0.59082 50 0.59082C77.6142 0.59082 100 22.9766 100 50.5908ZM9.08144 50.5908C9.08144 73.1895 27.4013 91.5094 50 91.5094C72.5987 91.5094 90.9186 73.1895 90.9186 50.5908C90.9186 27.9921 72.5987 9.67226 50 9.67226C27.4013 9.67226 9.08144 27.9921 9.08144 50.5908Z",fill:"#E5E7EB"}),x("path",{d:"M93.9676 39.0409C96.393 38.4038 97.8624 35.9116 97.0079 33.5539C95.2932 28.8227 92.871 24.3692 89.8167 20.348C85.8452 15.1192 80.8826 10.7238 75.2124 7.41289C69.5422 4.10194 63.2754 1.94025 56.7698 1.05124C51.7666 0.367541 46.6976 0.446843 41.7345 1.27873C39.2613 1.69328 37.813 4.19778 38.4501 6.62326C39.0873 9.04874 41.5694 10.4717 44.0505 10.1071C47.8511 9.54855 51.7191 9.52689 55.5402 10.0491C60.8642 10.7766 65.9928 12.5457 70.6331 15.2552C75.2735 17.9648 79.3347 21.5619 82.5849 25.841C84.9175 28.9121 86.7997 32.2913 88.1811 35.8758C89.083 38.2158 91.5421 39.6781 93.9676 39.0409Z",fill:"currentColor"})]}),e.text]})}function zg(){let e=!1;return function(t){(/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino|android|ipad|playbook|silk/i.test(t)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(t.substr(0,4)))&&(e=!0)}(navigator.userAgent||navigator.vendor||("opera"in window&&typeof window.opera=="string"?window.opera:"")),e}const Ja=zg(),Rt={SAMPLING_RATE:16e3,DEFAULT_AUDIO_URL:`https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/${Ja?"jfk":"ted_60"}.wav`,DEFAULT_MODEL:"tiny",DEFAULT_SUBTASK:"transcribe",DEFAULT_LANGUAGE:"auto",DEFAULT_QUANTIZED:Ja,DEFAULT_MULTILINGUAL:!1};function Mg({text:e,percentage:t}){return t=t??0,x("div",{className:"mt-0.5 w-full relative text-sm text-white background-bg-cyan-400 bg-gray-200 border-1 border-gray-400 rounded-lg text-left overflow-hidden",children:B("div",{className:"top-0 h-full bg-blue-500 whitespace-nowrap px-2",style:{width:`${t}%`},children:[e," (",`${t.toFixed(2)}%`,")"]})})}function jg(e){return e=e.toLowerCase(),(e.match(/\w+.?/g)||[]).map(t=>t.charAt(0).toUpperCase()+t.slice(1)).join("")}const Za={en:"english",zh:"chinese",de:"german",es:"spanish/castilian",ru:"russian",ko:"korean",fr:"french",ja:"japanese",pt:"portuguese",tr:"turkish",pl:"polish",ca:"catalan/valencian",nl:"dutch/flemish",ar:"arabic",sv:"swedish",it:"italian",id:"indonesian",hi:"hindi",fi:"finnish",vi:"vietnamese",he:"hebrew",uk:"ukrainian",el:"greek",ms:"malay",cs:"czech",ro:"romanian/moldavian/moldovan",da:"danish",hu:"hungarian",ta:"tamil",no:"norwegian",th:"thai",ur:"urdu",hr:"croatian",bg:"bulgarian",lt:"lithuanian",la:"latin",mi:"maori",ml:"malayalam",cy:"welsh",sk:"slovak",te:"telugu",fa:"persian",lv:"latvian",bn:"bengali",sr:"serbian",az:"azerbaijani",sl:"slovenian",kn:"kannada",et:"estonian",mk:"macedonian",br:"breton",eu:"basque",is:"icelandic",hy:"armenian",ne:"nepali",mn:"mongolian",bs:"bosnian",kk:"kazakh",sq:"albanian",sw:"swahili",gl:"galician",mr:"marathi",pa:"punjabi/panjabi",si:"sinhala/sinhalese",km:"khmer",sn:"shona",yo:"yoruba",so:"somali",af:"afrikaans",oc:"occitan",ka:"georgian",be:"belarusian",tg:"tajik",sd:"sindhi",gu:"gujarati",am:"amharic",yi:"yiddish",lo:"lao",uz:"uzbek",fo:"faroese",ht:"haitian creole/haitian",ps:"pashto/pushto",tk:"turkmen",nn:"nynorsk",mt:"maltese",sa:"sanskrit",lb:"luxembourgish/letzeburgesch",my:"myanmar/burmese",bo:"tibetan",tl:"tagalog",mg:"malagasy",as:"assamese",tt:"tatar",haw:"hawaiian",ln:"lingala",ha:"hausa",ba:"bashkir",jw:"javanese",su:"sundanese"};function Ug(e){const[t,n]=w.useState(void 0),[r,l]=w.useState(void 0),[o,i]=w.useState(void 0),u=t!==void 0,s=async c=>{const d=new AudioContext({sampleRate:Rt.SAMPLING_RATE}),h=URL.createObjectURL(new Blob([c],{type:"audio/*"})),y=await d.decodeAudioData(c);l({buffer:y,url:h,source:"URL"})},a=async c=>{if(o)try{l(void 0),n(0);const{data:d}=await I0.get(o,{signal:c.signal,responseType:"arraybuffer",onDownloadProgress(h){n(h.progress||0)}});s(d)}catch(d){console.log("Request failed or aborted",d)}finally{n(void 0)}};return w.useEffect(()=>{if(o){const c=new AbortController;return a(c),()=>{c.abort()}}},[o]),B(on,{children:[B("div",{className:"flex flex-col justify-center items-center rounded-lg bg-white shadow-xl shadow-black/5 ring-1 ring-slate-700/10",children:[B("div",{className:"flex flex-row space-x-2 py-2 w-full px-2",children:[x(Qg,{icon:x(Gg,{}),text:"From URL",onUrlUpdate:c=>{e.transcriber.onInputChange(),i(c)}}),x(Hg,{}),x(Kg,{icon:x(qg,{}),text:"From file",onFileUpdate:(c,d)=>{e.transcriber.onInputChange(),l({buffer:c,url:d,source:"FILE"})}})]}),x(Vg,{progress:u?t:+!!r})]}),r&&B(on,{children:[x($g,{audioUrl:r.url}),B("div",{className:"relative w-full flex justify-center items-center",children:[x(Dg,{onClick:()=>{e.transcriber.start(r.buffer)},isModelLoading:e.transcriber.isModelLoading,isTranscribing:e.transcriber.isBusy}),x(Ig,{className:"absolute right-4",transcriber:e.transcriber,icon:x(Yg,{})})]}),e.transcriber.progressItems.length>0&&B("div",{className:"relative z-10 p-4 w-full",children:[x("label",{children:"Loading model files... (only run once)"}),e.transcriber.progressItems.map(c=>x("div",{children:x(Mg,{text:c.file,percentage:c.progress})},c.file))]})]})]})}function Ig(e){const[t,n]=w.useState(!1),r=()=>{n(!0)},l=()=>{n(!1)},o=i=>{l()};return B("div",{className:e.className,children:[x(Ps,{icon:e.icon,onClick:r}),x(Bg,{show:t,onSubmit:o,onClose:l,transcriber:e.transcriber})]})}function Bg(e){const t=Object.values(Za).map(jg),n={tiny:[61,231],base:[103,398],small:[290],medium:[833]};return x(Xd,{show:e.show,title:"Settings",content:B(on,{children:[x("label",{children:"Select the model to use."}),x("select",{className:"mt-1 mb-1 bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500",defaultValue:e.transcriber.model,onChange:r=>{e.transcriber.setModel(r.target.value)},children:Object.keys(n).filter(r=>e.transcriber.quantized||n[r].length==2).map(r=>x("option",{value:r,children:`whisper-${r}${e.transcriber.multilingual?"":".en"} (${n[r][e.transcriber.quantized?0:1]}MB)`},r))}),B("div",{className:"flex justify-between items-center mb-3 px-1",children:[B("div",{className:"flex",children:[x("input",{id:"multilingual",type:"checkbox",checked:e.transcriber.multilingual,onChange:r=>{e.transcriber.setMultilingual(r.target.checked)}}),x("label",{htmlFor:"multilingual",className:"ms-1",children:"Multilingual"})]}),B("div",{className:"flex",children:[x("input",{id:"quantize",type:"checkbox",checked:e.transcriber.quantized,onChange:r=>{e.transcriber.setQuantized(r.target.checked)}}),x("label",{htmlFor:"quantize",className:"ms-1",children:"Quantized"})]})]}),e.transcriber.multilingual&&B(on,{children:[x("label",{children:"Select the source language."}),B("select",{className:"mt-1 mb-3 bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500",defaultValue:e.transcriber.language,onChange:r=>{e.transcriber.setLanguage(r.target.value)},children:[x("option",{value:"auto",children:"Auto-detect"},-1),Object.keys(Za).map((r,l)=>x("option",{value:r,children:t[l]},r))]}),x("label",{children:"Select the task to perform."}),B("select",{className:"mt-1 mb-3 bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500",defaultValue:e.transcriber.subtask,onChange:r=>{e.transcriber.setSubtask(r.target.value)},children:[x("option",{value:"transcribe",children:"Transcribe"}),x("option",{value:"translate",children:"Translate (to English)"})]})]})]}),onClose:e.onClose,onSubmit:()=>{}})}function Hg(){return x("div",{className:"w-[1px] bg-slate-200"})}function Vg(e){return x(Wg,{progress:`${Math.round(e.progress*100)}%`})}function Wg(e){return x("div",{className:"w-full bg-gray-200 rounded-full h-1 dark:bg-gray-700",children:x("div",{className:"bg-blue-600 h-1 rounded-full transition-all duration-100",style:{width:e.progress}})})}function Qg(e){const[t,n]=w.useState(!1),r=()=>{n(!0)},l=()=>{n(!1)},o=i=>{e.onUrlUpdate(i),l()};return B(on,{children:[x(Ps,{icon:e.icon,text:e.text,onClick:r}),x(bg,{show:t,onSubmit:o,onClose:l})]})}function bg(e){const[t,n]=w.useState(Rt.DEFAULT_AUDIO_URL),r=o=>{n(o.target.value)},l=()=>{e.onSubmit(t)};return x(Xd,{show:e.show,title:"From URL",content:B(on,{children:["Enter the URL of the audio file you want to load.",x(Ag,{onChange:r,value:t})]}),onClose:e.onClose,submitText:"Load",onSubmit:l})}function Kg(e){let t=document.createElement("input");return t.type="file",t.oninput=n=>{let r=n.target.files;if(!r)return;const l=URL.createObjectURL(r[0]),o=new FileReader;o.addEventListener("load",async i=>{var c;const u=(c=i.target)==null?void 0:c.result;if(!u)return;const a=await new AudioContext({sampleRate:Rt.SAMPLING_RATE}).decodeAudioData(u);e.onFileUpdate(a,l)}),o.readAsArrayBuffer(r[0]),t.value=""},x(on,{children:x(Ps,{icon:e.icon,text:e.text,onClick:()=>t.click()})})}function Ps(e){return B("button",{onClick:e.onClick,className:"flex items-center justify-center rounded-lg p-2 bg-blue text-slate-500 hover:text-indigo-600 hover:bg-indigo-50 transition-all duration-200",children:[x("div",{className:"w-7 h-7",children:e.icon}),e.text&&x("div",{className:"ml-2 break-text text-center text-md w-30",children:e.text})]})}function Gg(){return x("svg",{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",strokeWidth:"1.5",stroke:"currentColor",children:x("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M13.19 8.688a4.5 4.5 0 011.242 7.244l-4.5 4.5a4.5 4.5 0 01-6.364-6.364l1.757-1.757m13.35-.622l1.757-1.757a4.5 4.5 0 00-6.364-6.364l-4.5 4.5a4.5 4.5 0 001.242 7.244"})})}function qg(){return x("svg",{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",strokeWidth:"1.5",stroke:"currentColor",children:x("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M3.75 9.776c.112-.017.227-.026.344-.026h15.812c.117 0 .232.009.344.026m-16.5 0a2.25 2.25 0 00-1.883 2.542l.857 6a2.25 2.25 0 002.227 1.932H19.05a2.25 2.25 0 002.227-1.932l.857-6a2.25 2.25 0 00-1.883-2.542m-16.5 0V6A2.25 2.25 0 016 3.75h3.879a1.5 1.5 0 011.06.44l2.122 2.12a1.5 1.5 0 001.06.44H18A2.25 2.25 0 0120.25 9v.776"})})}function Yg(){return B("svg",{xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",strokeWidth:"1.25",stroke:"currentColor",children:[x("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M9.594 3.94c.09-.542.56-.94 1.11-.94h2.593c.55 0 1.02.398 1.11.94l.213 1.281c.063.374.313.686.645.87.074.04.147.083.22.127.324.196.72.257 1.075.124l1.217-.456a1.125 1.125 0 011.37.49l1.296 2.247a1.125 1.125 0 01-.26 1.431l-1.003.827c-.293.24-.438.613-.431.992a6.759 6.759 0 010 .255c-.007.378.138.75.43.99l1.005.828c.424.35.534.954.26 1.43l-1.298 2.247a1.125 1.125 0 01-1.369.491l-1.217-.456c-.355-.133-.75-.072-1.076.124a6.57 6.57 0 01-.22.128c-.331.183-.581.495-.644.869l-.213 1.28c-.09.543-.56.941-1.11.941h-2.594c-.55 0-1.02-.398-1.11-.94l-.213-1.281c-.062-.374-.312-.686-.644-.87a6.52 6.52 0 01-.22-.127c-.325-.196-.72-.257-1.076-.124l-1.217.456a1.125 1.125 0 01-1.369-.49l-1.297-2.247a1.125 1.125 0 01.26-1.431l1.004-.827c.292-.24.437-.613.43-.992a6.932 6.932 0 010-.255c.007-.378-.138-.75-.43-.99l-1.004-.828a1.125 1.125 0 01-.26-1.43l1.297-2.247a1.125 1.125 0 011.37-.491l1.216.456c.356.133.751.072 1.076-.124.072-.044.146-.087.22-.128.332-.183.582-.495.644-.869l.214-1.281z"}),x("path",{strokeLinecap:"round",strokeLinejoin:"round",d:"M15 12a3 3 0 11-6 0 3 3 0 016 0z"})]})}function gi(e){return String(e).padStart(2,"0")}function Xg(e){const t=e/3600|0;e-=t*(60*60);const n=e/60|0;e-=n*60;const r=e|0;return`${t?gi(t)+":":""}${gi(n)}:${gi(r)}`}function Jg({transcribedData:e}){const t=w.useRef(null),n=()=>{let r=JSON.stringify((e==null?void 0:e.chunks)??{},null,2);const l=/( "timestamp": )\[\s+(\S+)\s+(\S+)\s+\]/gm;r=r.replace(l,"$1[$2 $3]");const o=new Blob([r],{type:"application/json"}),i=URL.createObjectURL(o),u=document.createElement("a");u.href=i,u.download="transcript.json",u.click(),URL.revokeObjectURL(i)};return w.useEffect(()=>{t.current&&Math.abs(t.current.offsetHeight+t.current.scrollTop-t.current.scrollHeight)<=64&&(t.current.scrollTop=t.current.scrollHeight)}),B("div",{ref:t,className:"w-full flex flex-col my-2 p-4 max-h-[20rem] overflow-y-auto",children:[e&&e.chunks.map((r,l)=>B("div",{className:"w-full flex flex-row mb-2 bg-white rounded-lg p-4 shadow-xl shadow-black/5 ring-1 ring-slate-700/10",children:[x("div",{className:"mr-5",children:Xg(r.timestamp[0])}),r.text]},`${l}-${r.text}`)),e&&!e.isBusy&&x("div",{className:"w-full text-right",children:x("button",{onClick:n,className:"text-white bg-green-500 hover:bg-green-600 focus:ring-4 focus:ring-green-300 font-medium rounded-lg text-sm px-4 py-2 text-center mr-2 dark:bg-green-600 dark:hover:bg-green-700 dark:focus:ring-green-800 inline-flex items-center",children:"Export JSON"})})]})}function Zg(e){const[t]=w.useState(()=>ev(e));return t}function ev(e){const t=new Worker(new URL("/assets/worker-c82d7cb9.js",self.location),{type:"module"});return t.addEventListener("message",e),t}function tv(){const[e,t]=w.useState(void 0),[n,r]=w.useState(!1),[l,o]=w.useState(!1),[i,u]=w.useState([]),s=Zg(L=>{const T=L.data;switch(T.status){case"progress":u(O=>O.map(V=>V.file===T.file?{...V,progress:T.progress}:V));break;case"update":const _=T;t({isBusy:!0,text:_.data[0],chunks:_.data[1].chunks});break;case"complete":const U=T;t({isBusy:!1,text:U.data.text,chunks:U.data.chunks}),r(!1);break;case"initiate":o(!0),u(O=>[...O,T]);break;case"ready":o(!1);break;case"error":r(!1),alert(`${T.data.message} This is most likely because you are using Safari on an M1/M2 Mac. Please try again from Chrome, Firefox, or Edge.
-
-If this is not the case, please file a bug report.`);break;case"done":u(O=>O.filter(V=>V.file!==T.file));break}}),[a,c]=w.useState(Rt.DEFAULT_MODEL),[d,h]=w.useState(Rt.DEFAULT_SUBTASK),[y,m]=w.useState(Rt.DEFAULT_QUANTIZED),[v,P]=w.useState(Rt.DEFAULT_MULTILINGUAL),[p,f]=w.useState(Rt.DEFAULT_LANGUAGE),g=w.useCallback(()=>{t(void 0)},[]),E=w.useCallback(async L=>{L&&(t(void 0),r(!0),s.postMessage({audio:L.getChannelData(0),model:a,multilingual:v,quantized:y,subtask:v?d:null,language:v&&p!=="auto"?p:null}))},[s,a,v,y,d,p]);return w.useMemo(()=>({onInputChange:g,isBusy:n,isModelLoading:l,progressItems:i,start:E,output:e,model:a,setModel:c,multilingual:v,setMultilingual:P,quantized:y,setQuantized:m,subtask:d,setSubtask:h,language:p,setLanguage:f}),[n,l,i,E,e,a,v,y,d,p])}function nv(){const e=tv();return x("div",{className:"flex justify-center items-center min-h-screen",children:B("div",{className:"container flex flex-col justify-center items-center",children:[x("h1",{className:"text-5xl font-extrabold tracking-tight text-slate-900 sm:text-7xl text-center",children:"Whisper Web"}),x("h2",{className:"mt-3 mb-5 px-4 text-center text-1xl font-semibold tracking-tight text-slate-900 sm:text-2xl",children:"ML-powered speech recognition directly in your browser"}),x(Ug,{transcriber:e}),x(Jg,{transcribedData:e.output})]})})}yi.createRoot(document.getElementById("root")).render(x(D.StrictMode,{children:x(nv,{})}));
diff --git a/spaces/XzJosh/Echo-Bert-VITS2/text/japanese.py b/spaces/XzJosh/Echo-Bert-VITS2/text/japanese.py
deleted file mode 100644
index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Echo-Bert-VITS2/text/japanese.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py
-import re
-import sys
-
-import pyopenjtalk
-
-from text import symbols
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(
- r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(
- r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (symbol, Japanese) pairs for marks:
-_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('%', 'パーセント')
-]]
-
-
-# List of (consonant, sokuon) pairs:
-_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
- (r'Q([↑↓]*[kg])', r'k#\1'),
- (r'Q([↑↓]*[tdjʧ])', r't#\1'),
- (r'Q([↑↓]*[sʃ])', r's\1'),
- (r'Q([↑↓]*[pb])', r'p#\1')
-]]
-
-# List of (consonant, hatsuon) pairs:
-_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
- (r'N([↑↓]*[pbm])', r'm\1'),
- (r'N([↑↓]*[ʧʥj])', r'n^\1'),
- (r'N([↑↓]*[tdn])', r'n\1'),
- (r'N([↑↓]*[kg])', r'ŋ\1')
-]]
-
-
-
-def post_replace_ph(ph):
- rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- 'v': "V"
- }
- if ph in rep_map.keys():
- ph = rep_map[ph]
- if ph in symbols:
- return ph
- if ph not in symbols:
- ph = 'UNK'
- return ph
-
-def symbols_to_japanese(text):
- for regex, replacement in _symbols_to_japanese:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def preprocess_jap(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- text = symbols_to_japanese(text)
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = []
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- p = pyopenjtalk.g2p(sentence)
- text += p.split(" ")
-
- if i < len(marks):
- text += [marks[i].replace(' ', '')]
- return text
-
-def text_normalize(text):
- # todo: jap text normalize
- return text
-
-def g2p(norm_text):
- phones = preprocess_jap(norm_text)
- phones = [post_replace_ph(i) for i in phones]
- # todo: implement tones and word2ph
- tones = [0 for i in phones]
- word2ph = [1 for i in phones]
- return phones, tones, word2ph
-
-
-if __name__ == '__main__':
- for line in open("../../../Downloads/transcript_utf8.txt").readlines():
- text = line.split(":")[1]
- phones, tones, word2ph = g2p(text)
- for p in phones:
- if p == "z":
- print(text, phones)
- sys.exit(0)
diff --git a/spaces/XzJosh/Nana7mi-Bert-VITS2/monotonic_align/core.py b/spaces/XzJosh/Nana7mi-Bert-VITS2/monotonic_align/core.py
deleted file mode 100644
index dddc688d76172b880054e544b7a217acd013f14f..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Nana7mi-Bert-VITS2/monotonic_align/core.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import numba
-
-
-@numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True)
-def maximum_path_jit(paths, values, t_ys, t_xs):
- b = paths.shape[0]
- max_neg_val=-1e9
- for i in range(int(b)):
- path = paths[i]
- value = values[i]
- t_y = t_ys[i]
- t_x = t_xs[i]
-
- v_prev = v_cur = 0.0
- index = t_x - 1
-
- for y in range(t_y):
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
- if x == y:
- v_cur = max_neg_val
- else:
- v_cur = value[y-1, x]
- if x == 0:
- if y == 0:
- v_prev = 0.
- else:
- v_prev = max_neg_val
- else:
- v_prev = value[y-1, x-1]
- value[y, x] += max(v_prev, v_cur)
-
- for y in range(t_y - 1, -1, -1):
- path[y, index] = 1
- if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
- index = index - 1
diff --git a/spaces/XzJosh/Taffy-Bert-VITS2/text/chinese_bert.py b/spaces/XzJosh/Taffy-Bert-VITS2/text/chinese_bert.py
deleted file mode 100644
index cb84ce0b426cd0a1c7954ddcdf41322c10ed14fa..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Taffy-Bert-VITS2/text/chinese_bert.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import torch
-from transformers import AutoTokenizer, AutoModelForMaskedLM
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large")
-model = AutoModelForMaskedLM.from_pretrained("./bert/chinese-roberta-wwm-ext-large").to(device)
-
-def get_bert_feature(text, word2ph):
- with torch.no_grad():
- inputs = tokenizer(text, return_tensors='pt')
- for i in inputs:
- inputs[i] = inputs[i].to(device)
- res = model(**inputs, output_hidden_states=True)
- res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu()
-
- assert len(word2ph) == len(text)+2
- word2phone = word2ph
- phone_level_feature = []
- for i in range(len(word2phone)):
- repeat_feature = res[i].repeat(word2phone[i], 1)
- phone_level_feature.append(repeat_feature)
-
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
-
-
- return phone_level_feature.T
-
-if __name__ == '__main__':
- # feature = get_bert_feature('你好,我是说的道理。')
- import torch
-
- word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征
- word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1]
-
- # 计算总帧数
- total_frames = sum(word2phone)
- print(word_level_feature.shape)
- print(word2phone)
- phone_level_feature = []
- for i in range(len(word2phone)):
- print(word_level_feature[i].shape)
-
- # 对每个词重复word2phone[i]次
- repeat_feature = word_level_feature[i].repeat(word2phone[i], 1)
- phone_level_feature.append(repeat_feature)
-
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
- print(phone_level_feature.shape) # torch.Size([36, 1024])
-
diff --git a/spaces/YUANAI/DiffspeechResearch/modules/commons/nar_tts_modules.py b/spaces/YUANAI/DiffspeechResearch/modules/commons/nar_tts_modules.py
deleted file mode 100644
index fe9a3325554ccf6059316d0848be1281c14ecffe..0000000000000000000000000000000000000000
--- a/spaces/YUANAI/DiffspeechResearch/modules/commons/nar_tts_modules.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import torch
-from torch import nn
-
-from modules.commons.layers import LayerNorm
-import torch.nn.functional as F
-
-
-class DurationPredictor(torch.nn.Module):
- def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0):
- super(DurationPredictor, self).__init__()
- self.offset = offset
- self.conv = torch.nn.ModuleList()
- self.kernel_size = kernel_size
- for idx in range(n_layers):
- in_chans = idim if idx == 0 else n_chans
- self.conv += [torch.nn.Sequential(
- torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=kernel_size // 2),
- torch.nn.ReLU(),
- LayerNorm(n_chans, dim=1),
- torch.nn.Dropout(dropout_rate)
- )]
- self.linear = nn.Sequential(torch.nn.Linear(n_chans, 1), nn.Softplus())
-
- def forward(self, x, x_padding=None):
- x = x.transpose(1, -1) # (B, idim, Tmax)
- for f in self.conv:
- x = f(x) # (B, C, Tmax)
- if x_padding is not None:
- x = x * (1 - x_padding.float())[:, None, :]
-
- x = self.linear(x.transpose(1, -1)) # [B, T, C]
- x = x * (1 - x_padding.float())[:, :, None] # (B, T, C)
- x = x[..., 0] # (B, Tmax)
- return x
-
-
-class LengthRegulator(torch.nn.Module):
- def __init__(self, pad_value=0.0):
- super(LengthRegulator, self).__init__()
- self.pad_value = pad_value
-
- def forward(self, dur, dur_padding=None, alpha=1.0):
- """
- Example (no batch dim version):
- 1. dur = [2,2,3]
- 2. token_idx = [[1],[2],[3]], dur_cumsum = [2,4,7], dur_cumsum_prev = [0,2,4]
- 3. token_mask = [[1,1,0,0,0,0,0],
- [0,0,1,1,0,0,0],
- [0,0,0,0,1,1,1]]
- 4. token_idx * token_mask = [[1,1,0,0,0,0,0],
- [0,0,2,2,0,0,0],
- [0,0,0,0,3,3,3]]
- 5. (token_idx * token_mask).sum(0) = [1,1,2,2,3,3,3]
-
- :param dur: Batch of durations of each frame (B, T_txt)
- :param dur_padding: Batch of padding of each frame (B, T_txt)
- :param alpha: duration rescale coefficient
- :return:
- mel2ph (B, T_speech)
- assert alpha > 0
- """
- dur = torch.round(dur.float() * alpha).long()
- if dur_padding is not None:
- dur = dur * (1 - dur_padding.long())
- token_idx = torch.arange(1, dur.shape[1] + 1)[None, :, None].to(dur.device)
- dur_cumsum = torch.cumsum(dur, 1)
- dur_cumsum_prev = F.pad(dur_cumsum, [1, -1], mode='constant', value=0)
-
- pos_idx = torch.arange(dur.sum(-1).max())[None, None].to(dur.device)
- token_mask = (pos_idx >= dur_cumsum_prev[:, :, None]) & (pos_idx < dur_cumsum[:, :, None])
- mel2token = (token_idx * token_mask.long()).sum(1)
- return mel2token
-
-
-class PitchPredictor(torch.nn.Module):
- def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5, dropout_rate=0.1):
- super(PitchPredictor, self).__init__()
- self.conv = torch.nn.ModuleList()
- self.kernel_size = kernel_size
- for idx in range(n_layers):
- in_chans = idim if idx == 0 else n_chans
- self.conv += [torch.nn.Sequential(
- torch.nn.Conv1d(in_chans, n_chans, kernel_size, padding=kernel_size // 2),
- torch.nn.ReLU(),
- LayerNorm(n_chans, dim=1),
- torch.nn.Dropout(dropout_rate)
- )]
- self.linear = torch.nn.Linear(n_chans, odim)
-
- def forward(self, x):
- """
-
- :param x: [B, T, H]
- :return: [B, T, H]
- """
- x = x.transpose(1, -1) # (B, idim, Tmax)
- for f in self.conv:
- x = f(x) # (B, C, Tmax)
- x = self.linear(x.transpose(1, -1)) # (B, Tmax, H)
- return x
-
-
-class EnergyPredictor(PitchPredictor):
- pass
diff --git a/spaces/Yan233th/so-vits-svc-models/utils.py b/spaces/Yan233th/so-vits-svc-models/utils.py
deleted file mode 100644
index 229ac28ca48940370f63f2a7691ee6561910e2a6..0000000000000000000000000000000000000000
--- a/spaces/Yan233th/so-vits-svc-models/utils.py
+++ /dev/null
@@ -1,502 +0,0 @@
-import os
-import glob
-import re
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import random
-
-import librosa
-import numpy as np
-from scipy.io.wavfile import read
-import torch
-from torch.nn import functional as F
-from modules.commons import sequence_mask
-from hubert import hubert_model
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-f0_bin = 256
-f0_max = 1100.0
-f0_min = 50.0
-f0_mel_min = 1127 * np.log(1 + f0_min / 700)
-f0_mel_max = 1127 * np.log(1 + f0_max / 700)
-
-
-# def normalize_f0(f0, random_scale=True):
-# f0_norm = f0.clone() # create a copy of the input Tensor
-# batch_size, _, frame_length = f0_norm.shape
-# for i in range(batch_size):
-# means = torch.mean(f0_norm[i, 0, :])
-# if random_scale:
-# factor = random.uniform(0.8, 1.2)
-# else:
-# factor = 1
-# f0_norm[i, 0, :] = (f0_norm[i, 0, :] - means) * factor
-# return f0_norm
-# def normalize_f0(f0, random_scale=True):
-# means = torch.mean(f0[:, 0, :], dim=1, keepdim=True)
-# if random_scale:
-# factor = torch.Tensor(f0.shape[0],1).uniform_(0.8, 1.2).to(f0.device)
-# else:
-# factor = torch.ones(f0.shape[0], 1, 1).to(f0.device)
-# f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
-# return f0_norm
-def normalize_f0(f0, x_mask, uv, random_scale=True):
- # calculate means based on x_mask
- uv_sum = torch.sum(uv, dim=1, keepdim=True)
- uv_sum[uv_sum == 0] = 9999
- means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum
-
- if random_scale:
- factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device)
- else:
- factor = torch.ones(f0.shape[0], 1).to(f0.device)
- # normalize f0 based on means and factor
- f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
- if torch.isnan(f0_norm).any():
- exit(0)
- return f0_norm * x_mask
-
-
-def plot_data_to_numpy(x, y):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10, 2))
- plt.plot(x)
- plt.plot(y)
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-
-def interpolate_f0(f0):
- '''
- 对F0进行插值处理
- '''
-
- data = np.reshape(f0, (f0.size, 1))
-
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
- vuv_vector[data > 0.0] = 1.0
- vuv_vector[data <= 0.0] = 0.0
-
- ip_data = data
-
- frame_number = data.size
- last_value = 0.0
- for i in range(frame_number):
- if data[i] <= 0.0:
- j = i + 1
- for j in range(i + 1, frame_number):
- if data[j] > 0.0:
- break
- if j < frame_number - 1:
- if last_value > 0.0:
- step = (data[j] - data[i - 1]) / float(j - i)
- for k in range(i, j):
- ip_data[k] = data[i - 1] + step * (k - i + 1)
- else:
- for k in range(i, j):
- ip_data[k] = data[j]
- else:
- for k in range(i, frame_number):
- ip_data[k] = last_value
- else:
- ip_data[i] = data[i]
- last_value = data[i]
-
- return ip_data[:,0], vuv_vector[:,0]
-
-
-def compute_f0_parselmouth(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
- import parselmouth
- x = wav_numpy
- if p_len is None:
- p_len = x.shape[0]//hop_length
- else:
- assert abs(p_len-x.shape[0]//hop_length) < 4, "pad length error"
- time_step = hop_length / sampling_rate * 1000
- f0_min = 50
- f0_max = 1100
- f0 = parselmouth.Sound(x, sampling_rate).to_pitch_ac(
- time_step=time_step / 1000, voicing_threshold=0.6,
- pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
-
- pad_size=(p_len - len(f0) + 1) // 2
- if(pad_size>0 or p_len - len(f0) - pad_size>0):
- f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
- return f0
-
-def resize_f0(x, target_len):
- source = np.array(x)
- source[source<0.001] = np.nan
- target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
- res = np.nan_to_num(target)
- return res
-
-def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
- import pyworld
- if p_len is None:
- p_len = wav_numpy.shape[0]//hop_length
- f0, t = pyworld.dio(
- wav_numpy.astype(np.double),
- fs=sampling_rate,
- f0_ceil=800,
- frame_period=1000 * hop_length / sampling_rate,
- )
- f0 = pyworld.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate)
- for index, pitch in enumerate(f0):
- f0[index] = round(pitch, 1)
- return resize_f0(f0, p_len)
-
-def f0_to_coarse(f0):
- is_torch = isinstance(f0, torch.Tensor)
- f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
-
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
- f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
- assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
- return f0_coarse
-
-
-def get_hubert_model():
- vec_path = "hubert/checkpoint_best_legacy_500.pt"
- print("load model(s) from {}".format(vec_path))
- from fairseq import checkpoint_utils
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
- [vec_path],
- suffix="",
- )
- model = models[0]
- model.eval()
- return model
-
-def get_hubert_content(hmodel, wav_16k_tensor):
- feats = wav_16k_tensor
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).fill_(False)
- inputs = {
- "source": feats.to(wav_16k_tensor.device),
- "padding_mask": padding_mask.to(wav_16k_tensor.device),
- "output_layer": 9, # layer 9
- }
- with torch.no_grad():
- logits = hmodel.extract_features(**inputs)
- feats = hmodel.final_proj(logits[0])
- return feats.transpose(1, 2)
-
-
-def get_content(cmodel, y):
- with torch.no_grad():
- c = cmodel.extract_features(y.squeeze(1))[0]
- c = c.transpose(1, 2)
- return c
-
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict = {}
- for k, v in state_dict.items():
- try:
- # assert "dec" in k or "disc" in k
- # print("load", k)
- new_state_dict[k] = saved_state_dict[k]
- assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
- except:
- print("error, %s is not in the checkpoint" % k)
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- print("load ")
- logger.info("Loaded checkpoint '{}' (iteration {})".format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
- iteration, checkpoint_path))
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- torch.save({'model': state_dict,
- 'iteration': iteration,
- 'optimizer': optimizer.state_dict(),
- 'learning_rate': learning_rate}, checkpoint_path)
-
-def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
- """Freeing up space by deleting saved ckpts
-
- Arguments:
- path_to_models -- Path to the model directory
- n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
- sort_by_time -- True -> chronologically delete ckpts
- False -> lexicographically delete ckpts
- """
- ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
- name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
- time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
- sort_key = time_key if sort_by_time else name_key
- x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key)
- to_del = [os.path.join(path_to_models, fn) for fn in
- (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
- del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
- del_routine = lambda x: [os.remove(x), del_info(x)]
- rs = [del_routine(fn) for fn in to_del]
-
-def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
- for k, v in scalars.items():
- writer.add_scalar(k, v, global_step)
- for k, v in histograms.items():
- writer.add_histogram(k, v, global_step)
- for k, v in images.items():
- writer.add_image(k, v, global_step, dataformats='HWC')
- for k, v in audios.items():
- writer.add_audio(k, v, global_step, audio_sampling_rate)
-
-
-def latest_checkpoint_path(dir_path, regex="G_*.pth"):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
- x = f_list[-1]
- print(x)
- return x
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-def repeat_expand_2d(content, target_len):
- # content : [h, t]
-
- src_len = content.shape[-1]
- target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(content.device)
- temp = torch.arange(src_len+1) * target_len / src_len
- current_pos = 0
- for i in range(target_len):
- if i < temp[current_pos+1]:
- target[:, i] = content[:, current_pos]
- else:
- current_pos += 1
- target[:, i] = content[:, current_pos]
-
- return target
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
-
diff --git a/spaces/YazawaSunrise/so-vits-svc-LoveLive/hubert/hubert_model.py b/spaces/YazawaSunrise/so-vits-svc-LoveLive/hubert/hubert_model.py
deleted file mode 100644
index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000
--- a/spaces/YazawaSunrise/so-vits-svc-LoveLive/hubert/hubert_model.py
+++ /dev/null
@@ -1,222 +0,0 @@
-import copy
-import random
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as t_func
-from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
-
-
-class Hubert(nn.Module):
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
- super().__init__()
- self._mask = mask
- self.feature_extractor = FeatureExtractor()
- self.feature_projection = FeatureProjection()
- self.positional_embedding = PositionalConvEmbedding()
- self.norm = nn.LayerNorm(768)
- self.dropout = nn.Dropout(0.1)
- self.encoder = TransformerEncoder(
- nn.TransformerEncoderLayer(
- 768, 12, 3072, activation="gelu", batch_first=True
- ),
- 12,
- )
- self.proj = nn.Linear(768, 256)
-
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
-
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- mask = None
- if self.training and self._mask:
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
- x[mask] = self.masked_spec_embed.to(x.dtype)
- return x, mask
-
- def encode(
- self, x: torch.Tensor, layer: Optional[int] = None
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- x = self.feature_extractor(x)
- x = self.feature_projection(x.transpose(1, 2))
- x, mask = self.mask(x)
- x = x + self.positional_embedding(x)
- x = self.dropout(self.norm(x))
- x = self.encoder(x, output_layer=layer)
- return x, mask
-
- def logits(self, x: torch.Tensor) -> torch.Tensor:
- logits = torch.cosine_similarity(
- x.unsqueeze(2),
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
- dim=-1,
- )
- return logits / 0.1
-
- def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- x, mask = self.encode(x)
- x = self.proj(x)
- logits = self.logits(x)
- return logits, mask
-
-
-class HubertSoft(Hubert):
- def __init__(self):
- super().__init__()
-
- @torch.inference_mode()
- def units(self, wav: torch.Tensor) -> torch.Tensor:
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
- x, _ = self.encode(wav)
- return self.proj(x)
-
-
-class FeatureExtractor(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
- self.norm0 = nn.GroupNorm(512, 512)
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = t_func.gelu(self.norm0(self.conv0(x)))
- x = t_func.gelu(self.conv1(x))
- x = t_func.gelu(self.conv2(x))
- x = t_func.gelu(self.conv3(x))
- x = t_func.gelu(self.conv4(x))
- x = t_func.gelu(self.conv5(x))
- x = t_func.gelu(self.conv6(x))
- return x
-
-
-class FeatureProjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.norm = nn.LayerNorm(512)
- self.projection = nn.Linear(512, 768)
- self.dropout = nn.Dropout(0.1)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.norm(x)
- x = self.projection(x)
- x = self.dropout(x)
- return x
-
-
-class PositionalConvEmbedding(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv = nn.Conv1d(
- 768,
- 768,
- kernel_size=128,
- padding=128 // 2,
- groups=16,
- )
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.conv(x.transpose(1, 2))
- x = t_func.gelu(x[:, :, :-1])
- return x.transpose(1, 2)
-
-
-class TransformerEncoder(nn.Module):
- def __init__(
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
- ) -> None:
- super(TransformerEncoder, self).__init__()
- self.layers = nn.ModuleList(
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
- )
- self.num_layers = num_layers
-
- def forward(
- self,
- src: torch.Tensor,
- mask: torch.Tensor = None,
- src_key_padding_mask: torch.Tensor = None,
- output_layer: Optional[int] = None,
- ) -> torch.Tensor:
- output = src
- for layer in self.layers[:output_layer]:
- output = layer(
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
- )
- return output
-
-
-def _compute_mask(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- device: torch.device,
- min_masks: int = 0,
-) -> torch.Tensor:
- batch_size, sequence_length = shape
-
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
-
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
- )
-
- # compute number of masked spans in batch
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
- num_masked_spans = max(num_masked_spans, min_masks)
-
- # make sure num masked indices <= sequence_length
- if num_masked_spans * mask_length > sequence_length:
- num_masked_spans = sequence_length // mask_length
-
- # SpecAugment mask to fill
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
-
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
- uniform_dist = torch.ones(
- (batch_size, sequence_length - (mask_length - 1)), device=device
- )
-
- # get random indices to mask
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
-
- # expand masked indices to masked spans
- mask_indices = (
- mask_indices.unsqueeze(dim=-1)
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- offsets = (
- torch.arange(mask_length, device=device)[None, None, :]
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- mask_idxs = mask_indices + offsets
-
- # scatter indices to mask
- mask = mask.scatter(1, mask_idxs, True)
-
- return mask
-
-
-def hubert_soft(
- path: str,
-) -> HubertSoft:
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
- Args:
- path (str): path of a pretrained model
- """
- hubert = HubertSoft()
- checkpoint = torch.load(path)
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
- hubert.load_state_dict(checkpoint)
- hubert.eval()
- return hubert
diff --git a/spaces/YotamNitzan/domain-expansion/style_mixing.py b/spaces/YotamNitzan/domain-expansion/style_mixing.py
deleted file mode 100644
index c47bebbc44c0126b6fd00a55b8b487dc7b159653..0000000000000000000000000000000000000000
--- a/spaces/YotamNitzan/domain-expansion/style_mixing.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Generate style mixing image matrix using pretrained network pickle."""
-
-import os
-import re
-from typing import List
-
-import click
-import dnnlib
-import numpy as np
-import PIL.Image
-import torch
-
-import legacy
-
-#----------------------------------------------------------------------------
-
-def num_range(s: str) -> List[int]:
- '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
-
- range_re = re.compile(r'^(\d+)-(\d+)$')
- m = range_re.match(s)
- if m:
- return list(range(int(m.group(1)), int(m.group(2))+1))
- vals = s.split(',')
- return [int(x) for x in vals]
-
-#----------------------------------------------------------------------------
-
-@click.command()
-@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
-@click.option('--rows', 'row_seeds', type=num_range, help='Random seeds to use for image rows', required=True)
-@click.option('--cols', 'col_seeds', type=num_range, help='Random seeds to use for image columns', required=True)
-@click.option('--styles', 'col_styles', type=num_range, help='Style layer range', default='0-6', show_default=True)
-@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
-@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
-@click.option('--outdir', type=str, required=True)
-def generate_style_mix(
- network_pkl: str,
- row_seeds: List[int],
- col_seeds: List[int],
- col_styles: List[int],
- truncation_psi: float,
- noise_mode: str,
- outdir: str
-):
- """Generate images using pretrained network pickle.
-
- Examples:
-
- \b
- python style_mixing.py --outdir=out --rows=85,100,75,458,1500 --cols=55,821,1789,293 \\
- --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
- """
- print('Loading networks from "%s"...' % network_pkl)
- device = torch.device('cuda')
- with dnnlib.util.open_url(network_pkl) as f:
- G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
-
- os.makedirs(outdir, exist_ok=True)
-
- print('Generating W vectors...')
- all_seeds = list(set(row_seeds + col_seeds))
- all_z = np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])
- all_w = G.mapping(torch.from_numpy(all_z).to(device), None)
- w_avg = G.mapping.w_avg
- all_w = w_avg + (all_w - w_avg) * truncation_psi
- w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))}
-
- print('Generating images...')
- all_images = G.synthesis(all_w, noise_mode=noise_mode)
- all_images = (all_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
- image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}
-
- print('Generating style-mixed images...')
- for row_seed in row_seeds:
- for col_seed in col_seeds:
- w = w_dict[row_seed].clone()
- w[col_styles] = w_dict[col_seed][col_styles]
- image = G.synthesis(w[np.newaxis], noise_mode=noise_mode)
- image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
- image_dict[(row_seed, col_seed)] = image[0].cpu().numpy()
-
- print('Saving images...')
- os.makedirs(outdir, exist_ok=True)
- for (row_seed, col_seed), image in image_dict.items():
- PIL.Image.fromarray(image, 'RGB').save(f'{outdir}/{row_seed}-{col_seed}.png')
-
- print('Saving image grid...')
- W = G.img_resolution
- H = G.img_resolution
- canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')
- for row_idx, row_seed in enumerate([0] + row_seeds):
- for col_idx, col_seed in enumerate([0] + col_seeds):
- if row_idx == 0 and col_idx == 0:
- continue
- key = (row_seed, col_seed)
- if row_idx == 0:
- key = (col_seed, col_seed)
- if col_idx == 0:
- key = (row_seed, row_seed)
- canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))
- canvas.save(f'{outdir}/grid.png')
-
-
-#----------------------------------------------------------------------------
-
-if __name__ == "__main__":
- generate_style_mix() # pylint: disable=no-value-for-parameter
-
-#----------------------------------------------------------------------------
diff --git a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py b/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py
deleted file mode 100644
index 2753b3ddee43c7a9fe28d1824db5d786e7e1ad59..0000000000000000000000000000000000000000
--- a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py
+++ /dev/null
@@ -1,297 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from timm.models.layers import DropPath
-
-
-class FeatureResizer(nn.Module):
- """
- This class takes as input a set of embeddings of dimension C1 and outputs a set of
- embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
- """
-
- def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True):
- super().__init__()
- self.do_ln = do_ln
- # Object feature encoding
- self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
- self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12)
- self.dropout = nn.Dropout(dropout)
-
- def forward(self, encoder_features):
- x = self.fc(encoder_features)
- if self.do_ln:
- x = self.layer_norm(x)
- output = self.dropout(x)
- return output
-
-
-def l1norm(X, dim, eps=1e-8):
- """L1-normalize columns of X"""
- norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps
- X = torch.div(X, norm)
- return X
-
-
-def l2norm(X, dim, eps=1e-8):
- """L2-normalize columns of X"""
- norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
- X = torch.div(X, norm)
- return X
-
-
-def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8):
- """
- query: (n_context, queryL, d)
- context: (n_context, sourceL, d)
- """
- batch_size_q, queryL = query.size(0), query.size(1)
- batch_size, sourceL = context.size(0), context.size(1)
-
- # Get attention
- # --> (batch, d, queryL)
- queryT = torch.transpose(query, 1, 2)
-
- # (batch, sourceL, d)(batch, d, queryL)
- # --> (batch, sourceL, queryL)
- attn = torch.bmm(context, queryT)
- if raw_feature_norm == "softmax":
- # --> (batch*sourceL, queryL)
- attn = attn.view(batch_size * sourceL, queryL)
- attn = nn.Softmax()(attn)
- # --> (batch, sourceL, queryL)
- attn = attn.view(batch_size, sourceL, queryL)
- elif raw_feature_norm == "l2norm":
- attn = l2norm(attn, 2)
- elif raw_feature_norm == "clipped_l2norm":
- attn = nn.LeakyReLU(0.1)(attn)
- attn = l2norm(attn, 2)
- else:
- raise ValueError("unknown first norm type:", raw_feature_norm)
- # --> (batch, queryL, sourceL)
- attn = torch.transpose(attn, 1, 2).contiguous()
- # --> (batch*queryL, sourceL)
- attn = attn.view(batch_size * queryL, sourceL)
- attn = nn.Softmax()(attn * smooth)
- # --> (batch, queryL, sourceL)
- attn = attn.view(batch_size, queryL, sourceL)
- # --> (batch, sourceL, queryL)
- attnT = torch.transpose(attn, 1, 2).contiguous()
-
- # --> (batch, d, sourceL)
- contextT = torch.transpose(context, 1, 2)
- # (batch x d x sourceL)(batch x sourceL x queryL)
- # --> (batch, d, queryL)
- weightedContext = torch.bmm(contextT, attnT)
- # --> (batch, queryL, d)
- weightedContext = torch.transpose(weightedContext, 1, 2)
-
- return weightedContext, attnT
-
-
-class BiMultiHeadAttention(nn.Module):
- def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None):
- super(BiMultiHeadAttention, self).__init__()
-
- self.embed_dim = embed_dim
- self.num_heads = num_heads
- self.head_dim = embed_dim // num_heads
- self.v_dim = v_dim
- self.l_dim = l_dim
-
- assert (
- self.head_dim * self.num_heads == self.embed_dim
- ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
- self.scale = self.head_dim ** (-0.5)
- self.dropout = dropout
-
- self.v_proj = nn.Linear(self.v_dim, self.embed_dim)
- self.l_proj = nn.Linear(self.l_dim, self.embed_dim)
- self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim)
- self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim)
-
- self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim)
- self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim)
-
- self.stable_softmax_2d = True
- self.clamp_min_for_underflow = True
- self.clamp_max_for_overflow = True
-
- self._reset_parameters()
-
- def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
-
- def _reset_parameters(self):
- nn.init.xavier_uniform_(self.v_proj.weight)
- self.v_proj.bias.data.fill_(0)
- nn.init.xavier_uniform_(self.l_proj.weight)
- self.l_proj.bias.data.fill_(0)
- nn.init.xavier_uniform_(self.values_v_proj.weight)
- self.values_v_proj.bias.data.fill_(0)
- nn.init.xavier_uniform_(self.values_l_proj.weight)
- self.values_l_proj.bias.data.fill_(0)
- nn.init.xavier_uniform_(self.out_v_proj.weight)
- self.out_v_proj.bias.data.fill_(0)
- nn.init.xavier_uniform_(self.out_l_proj.weight)
- self.out_l_proj.bias.data.fill_(0)
-
- def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
- """_summary_
-
- Args:
- v (_type_): bs, n_img, dim
- l (_type_): bs, n_text, dim
- attention_mask_v (_type_, optional): _description_. bs, n_img
- attention_mask_l (_type_, optional): _description_. bs, n_text
-
- Returns:
- _type_: _description_
- """
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
- # import ipdb; ipdb.set_trace()
- bsz, tgt_len, _ = v.size()
-
- query_states = self.v_proj(v) * self.scale
- key_states = self._shape(self.l_proj(l), -1, bsz)
- value_v_states = self._shape(self.values_v_proj(v), -1, bsz)
- value_l_states = self._shape(self.values_l_proj(l), -1, bsz)
-
- proj_shape = (bsz * self.num_heads, -1, self.head_dim)
- query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
- key_states = key_states.view(*proj_shape)
- value_v_states = value_v_states.view(*proj_shape)
- value_l_states = value_l_states.view(*proj_shape)
-
- src_len = key_states.size(1)
- attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt
-
- if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
- raise ValueError(
- f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
- )
-
- if self.stable_softmax_2d:
- attn_weights = attn_weights - attn_weights.max()
-
- if self.clamp_min_for_underflow:
- attn_weights = torch.clamp(
- attn_weights, min=-50000
- ) # Do not increase -50000, data type half has quite limited range
- if self.clamp_max_for_overflow:
- attn_weights = torch.clamp(
- attn_weights, max=50000
- ) # Do not increase 50000, data type half has quite limited range
-
- attn_weights_T = attn_weights.transpose(1, 2)
- attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0]
- if self.clamp_min_for_underflow:
- attn_weights_l = torch.clamp(
- attn_weights_l, min=-50000
- ) # Do not increase -50000, data type half has quite limited range
- if self.clamp_max_for_overflow:
- attn_weights_l = torch.clamp(
- attn_weights_l, max=50000
- ) # Do not increase 50000, data type half has quite limited range
-
- # mask vison for language
- if attention_mask_v is not None:
- attention_mask_v = (
- attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
- )
- attn_weights_l.masked_fill_(attention_mask_v, float("-inf"))
-
- attn_weights_l = attn_weights_l.softmax(dim=-1)
-
- # mask language for vision
- if attention_mask_l is not None:
- attention_mask_l = (
- attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
- )
- attn_weights.masked_fill_(attention_mask_l, float("-inf"))
- attn_weights_v = attn_weights.softmax(dim=-1)
-
- attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training)
- attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training)
-
- attn_output_v = torch.bmm(attn_probs_v, value_l_states)
- attn_output_l = torch.bmm(attn_probs_l, value_v_states)
-
- if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
- raise ValueError(
- f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}"
- )
-
- if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim):
- raise ValueError(
- f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}"
- )
-
- attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim)
- attn_output_v = attn_output_v.transpose(1, 2)
- attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim)
-
- attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim)
- attn_output_l = attn_output_l.transpose(1, 2)
- attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim)
-
- attn_output_v = self.out_v_proj(attn_output_v)
- attn_output_l = self.out_l_proj(attn_output_l)
-
- return attn_output_v, attn_output_l
-
-
-# Bi-Direction MHA (text->image, image->text)
-class BiAttentionBlock(nn.Module):
- def __init__(
- self,
- v_dim,
- l_dim,
- embed_dim,
- num_heads,
- dropout=0.1,
- drop_path=0.0,
- init_values=1e-4,
- cfg=None,
- ):
- """
- Inputs:
- embed_dim - Dimensionality of input and attention feature vectors
- hidden_dim - Dimensionality of hidden layer in feed-forward network
- (usually 2-4x larger than embed_dim)
- num_heads - Number of heads to use in the Multi-Head Attention block
- dropout - Amount of dropout to apply in the feed-forward network
- """
- super(BiAttentionBlock, self).__init__()
-
- # pre layer norm
- self.layer_norm_v = nn.LayerNorm(v_dim)
- self.layer_norm_l = nn.LayerNorm(l_dim)
- self.attn = BiMultiHeadAttention(
- v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout
- )
-
- # add layer scale for training stability
- self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
- self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True)
- self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True)
-
- def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
- v = self.layer_norm_v(v)
- l = self.layer_norm_l(l)
- delta_v, delta_l = self.attn(
- v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l
- )
- # v, l = v + delta_v, l + delta_l
- v = v + self.drop_path(self.gamma_v * delta_v)
- l = l + self.drop_path(self.gamma_l * delta_l)
- return v, l
-
- # def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None)
diff --git a/spaces/ZJunTvT/ZJunChat/run_Windows.bat b/spaces/ZJunTvT/ZJunChat/run_Windows.bat
deleted file mode 100644
index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000
--- a/spaces/ZJunTvT/ZJunChat/run_Windows.bat
+++ /dev/null
@@ -1,5 +0,0 @@
-@echo off
-echo Opening ChuanhuChatGPT...
-
-REM Open powershell via bat
-start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
diff --git a/spaces/a-v-bely/spanish-task-generator/utilities_language_general/esp_constants.py b/spaces/a-v-bely/spanish-task-generator/utilities_language_general/esp_constants.py
deleted file mode 100644
index 034492b0f865f5755f3c35b4afa40ae9f6057447..0000000000000000000000000000000000000000
--- a/spaces/a-v-bely/spanish-task-generator/utilities_language_general/esp_constants.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import json
-import spacy
-import gensim
-import streamlit as st
-from transformers import pipeline
-
-
-@st.cache_resource
-def load_w2v(model_path):
- with st.spinner('Загружаю языковую модель'):
- _w2v_model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=True)
- return _w2v_model
-
-
-@st.cache_resource
-def load_spacy():
- with st.spinner('Загружаю морфо-синтаксический парсер'):
- _nlp = spacy.load('es_core_news_lg')
- return _nlp
-
-
-@st.cache_resource
-def load_bert():
- with st.spinner('Загружаю языковую модель'):
- _pipeline = pipeline(task="fill-mask", model="a-v-white/bert-base-spanish-wwm-cased-finetuned-literature-pro")
- return _pipeline
-
-
-nlp = load_spacy()
-w2v_model_1_path = r'model1.gz'
-w2v_model_2_path = r'model2.gz'
-
-# Upload minimums
-a1_path, a1_target_set = r'lexical_minimums/A1_MINIMUM.txt', set()
-a2_path, a2_target_set = r'lexical_minimums/A2_MINIMUM.txt', set()
-b1_path, b1_target_set = r'lexical_minimums/B1_MINIMUM.txt', set()
-b2_path, b2_target_set = r'lexical_minimums/B2_MINIMUM.txt', set()
-c1_path, c1_target_set = r'lexical_minimums/C1_MINIMUM.txt', set()
-c2_path, c2_target_set = r'lexical_minimums/C2_MINIMUM.txt', set()
-minimums_paths = (a1_path, a2_path, b1_path, b2_path)
-minimums_sets = (a1_target_set, a2_target_set, b1_target_set, b2_target_set, c1_target_set, c2_target_set)
-for i in range(len(minimums_paths)):
- with open(minimums_paths[i], 'r', encoding='utf-8') as read_file:
- for line in read_file:
- minimums_sets[i].add(line.strip())
-
-a1_distractor_set = a1_target_set
-a2_distractor_set = a2_target_set.union(a1_target_set)
-b1_distractor_set = b1_target_set.union(a2_target_set)
-b2_distractor_set = b2_target_set.union(b1_target_set)
-c1_distractor_set = c1_target_set.union(b2_target_set)
-c2_distractor_set = c2_target_set.union(c1_target_set)
-
-with open('language_data/phrases.json', 'r', encoding='utf-8') as f:
- PHRASES = set(json.load(f)['PHRASES'])
-
-with open('language_data/fix_irregular_lemma.json', 'r', encoding='utf-8') as f:
- FIX_LEMMA = json.load(f)
-
-SIMILARITY_VALUES = {'A1': 1.0, 'A2': 1.0, 'B1': 1.0, 'B2': 1.0, 'C1': 1.0, 'C2': 1.0, 'Без уровня': 1.0}
-SIMILARITY_VALUES_bert = {'A1': 1.0, 'A2': 1.0, 'B1': 1.0, 'B2': 1.0, 'C1': 1.0, 'C2': 1.0, 'Без уровня': 1.0}
-
-BAD_USER_TARGET_WORDS = []
diff --git a/spaces/aadnk/whisper-webui/src/prompts/prependPromptStrategy.py b/spaces/aadnk/whisper-webui/src/prompts/prependPromptStrategy.py
deleted file mode 100644
index 6f8b6eba5b98310f57a656db73b5e415de3af958..0000000000000000000000000000000000000000
--- a/spaces/aadnk/whisper-webui/src/prompts/prependPromptStrategy.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from src.config import VadInitialPromptMode
-from src.prompts.abstractPromptStrategy import AbstractPromptStrategy
-
-class PrependPromptStrategy(AbstractPromptStrategy):
- """
- A simple prompt strategy that prepends a single prompt to all segments of audio, or prepends the prompt to the first segment of audio.
- """
- def __init__(self, initial_prompt: str, initial_prompt_mode: VadInitialPromptMode):
- """
- Parameters
- ----------
- initial_prompt: str
- The initial prompt to use for the transcription.
- initial_prompt_mode: VadInitialPromptMode
- The mode to use for the initial prompt. If set to PREPEND_FIRST_SEGMENT, the initial prompt will be prepended to the first segment of audio.
- If set to PREPEND_ALL_SEGMENTS, the initial prompt will be prepended to all segments of audio.
- """
- self.initial_prompt = initial_prompt
- self.initial_prompt_mode = initial_prompt_mode
-
- # This is a simple prompt strategy, so we only support these two modes
- if initial_prompt_mode not in [VadInitialPromptMode.PREPEND_ALL_SEGMENTS, VadInitialPromptMode.PREPREND_FIRST_SEGMENT]:
- raise ValueError(f"Unsupported initial prompt mode {initial_prompt_mode}")
-
- def get_segment_prompt(self, segment_index: int, whisper_prompt: str, detected_language: str) -> str:
- if (self.initial_prompt_mode == VadInitialPromptMode.PREPEND_ALL_SEGMENTS):
- return self._concat_prompt(self.initial_prompt, whisper_prompt)
- elif (self.initial_prompt_mode == VadInitialPromptMode.PREPREND_FIRST_SEGMENT):
- return self._concat_prompt(self.initial_prompt, whisper_prompt) if segment_index == 0 else whisper_prompt
- else:
- raise ValueError(f"Unknown initial prompt mode {self.initial_prompt_mode}")
\ No newline at end of file
diff --git a/spaces/abdvl/datahub_qa_bot/docs/advanced/mcp-mcl.md b/spaces/abdvl/datahub_qa_bot/docs/advanced/mcp-mcl.md
deleted file mode 100644
index 5a9052c19155b6e3d4f9531364d367579d21a833..0000000000000000000000000000000000000000
--- a/spaces/abdvl/datahub_qa_bot/docs/advanced/mcp-mcl.md
+++ /dev/null
@@ -1,159 +0,0 @@
-# MetadataChangeProposal & MetadataChangeLog Events
-
-## Overview & Vision
-
-As of release v0.8.7, two new important event streams have been introduced: MetadataChangeProposal & MetadataChangeLog. These topics serve as a more generic (and more appropriately named) versions of the classic MetadataChangeEvent and MetadataAuditEvent events, used for a) proposing and b) logging changes to the DataHub Metadata Graph.
-
-With these events, we move towards a more generic world, in which Metadata models are not strongly-typed parts of the event schemas themselves. This provides flexibility, allowing for the core models comprising the Metadata Graph to be added and changed dynamically, without requiring structural updates to Kafka or REST API schemas used for ingesting and serving Metadata.
-
-Moreover, we've focused in on the "aspect" as the atomic unit of write in DataHub. MetadataChangeProposal & MetadataChangeLog with carry only a single aspect in their payload, as opposed to the list of aspects carried by today's MCE & MAE. This more accurately reflects the atomicity contract of the metadata model, hopefully lessening confusion about transactional guarantees for multi-aspect writes in addition to making it simpler to tune into the metadata changes a consumer cares about.
-
-Making these events more generic does not come for free; we give up some in the form of Restli and Kafka-native schema validation and defer this responsibility to DataHub itself, who is the sole enforcer of the graph model contracts. Additionally, we add an extra step to unbundling the actual metadata by requiring a double-deserialization: that of the event / response body itself and another of the nested Metadata aspect.
-
-To mitigate these downsides, we are committed to providing cross-language client libraries capable of doing the hard work for you. We intend to publish these as strongly-typed artifacts generated from the "default" model set DataHub ships with. This stands in addition to an initiative to introduce an OpenAPI layer in DataHub's backend (gms) which would provide a strongly typed model.
-
-Ultimately, we intend to realize a state in which the Entities and Aspect schemas can be altered without requiring generated code and without maintaining a single mega-model schema (looking at you, Snapshot.pdl). The intention is that changes to the metadata model become even easier than they are today.
-
-## Modeling
-
-A Metadata Change Proposal is defined (in PDL) as follows
-
-```protobuf
-record MetadataChangeProposal {
-
- /**
- * Kafka audit header. See go/kafkaauditheader for more info.
- */
- auditHeader: optional KafkaAuditHeader
-
- /**
- * Type of the entity being written to
- */
- entityType: string
-
- /**
- * Urn of the entity being written
- **/
- entityUrn: optional Urn,
-
- /**
- * Key aspect of the entity being written
- */
- entityKeyAspect: optional GenericAspect
-
- /**
- * Type of change being proposed
- */
- changeType: ChangeType
-
- /**
- * Aspect of the entity being written to
- * Not filling this out implies that the writer wants to affect the entire entity
- * Note: This is only valid for CREATE and DELETE operations.
- **/
- aspectName: optional string
-
- aspect: optional GenericAspect
-
- /**
- * A string->string map of custom properties that one might want to attach to an event
- **/
- systemMetadata: optional SystemMetadata
-
-}
-```
-
-Each proposal comprises of the following:
-
-1. entityType
-
- Refers to the type of the entity e.g. dataset, chart
-
-2. entityUrn
-
- Urn of the entity being updated. Note, **exactly one** of entityUrn or entityKeyAspect must be filled out to correctly identify an entity.
-
-3. entityKeyAspect
-
- Key aspect of the entity. Instead of having a string URN, we will support identifying entities by their key aspect structs. Note, this is not supported as of now.
-
-4. changeType
-
- Type of change you are proposing: one of
-
- - UPSERT: Insert if not exists, update otherwise
- - CREATE: Insert if not exists, fail otherwise
- - UPDATE: Update if exists, fail otherwise
- - DELETE: Delete
- - PATCH: Patch the aspect instead of doing a full replace
-
- Only UPSERT is supported as of now.
-
-5. aspectName
-
- Name of the aspect. Must match the name in the "@Aspect" annotation.
-
-6. aspect
-
- To support strongly typed aspects, without having to keep track of a union of all existing aspects, we introduced a new object called GenericAspect.
-
- ```xml
- record GenericAspect {
- value: bytes
- contentType: string
- }
- ```
-
- It contains the type of serialization and the serialized value. Note, currently we only support "application/json" as contentType but will be adding more forms of serialization in the future. Validation of the serialized object happens in GMS against the schema matching the aspectName.
-
-7. systemMetadata
-
- Extra metadata about the proposal like run_id or updated timestamp.
-
-GMS processes the proposal and produces the Metadata Change Log, which looks like this.
-
-```protobuf
-record MetadataChangeLog includes MetadataChangeProposal {
-
- previousAspectValue: optional GenericAspect
-
- previousSystemMetadata: optional SystemMetadata
-
-}
-```
-
-It includes all fields in the proposal, but also has the previous version of the aspect value and system metadata. This allows the MCL processor to know the previous value before deciding to update all indices.
-
-## Topics
-
-Following the change in our event models, we introduced 4 new topics. The old topics will get deprecated as we fully migrate to this model.
-
-1. **MetadataChangeProposal_v1, FailedMetadataChangeProposal_v1**
-
- Analogous to the MCE topic, proposals that get produced into the MetadataChangeProposal_v1 topic, will get ingested to GMS asynchronously, and any failed ingestion will produce a failed MCP in the FailedMetadataChangeProposal_v1 topic.
-
-
-2. **MetadataChangeLog_Versioned_v1**
-
- Analogous to the MAE topic, MCLs for versioned aspects will get produced into this topic. Since versioned aspects have a source of truth that can be separately backed up, the retention of this topic is short (by default 7 days). Note both this and the next topic are consumed by the same MCL processor.
-
-
-3. **MetadataChangeLog_Timeseries_v1**
-
- Analogous to the MAE topics, MCLs for timeseries aspects will get produced into this topic. Since timeseries aspects do not have a source of truth, but rather gets ingested straight to elasticsearch, we set the retention of this topic to be longer (90 days). You can backup timeseries aspect by replaying this topic.
-
-## Configuration
-
-With MetadataChangeProposal and MetadataChangeLog, we will introduce a new mechanism for configuring the association between Metadata Entities & Aspects. Specifically, the Snapshot.pdl model will no longer encode this information by way of [Rest.li](http://rest.li) union. Instead, a more explicit yaml file will provide these links. This file will be leveraged at runtime to construct the in-memory Entity Registry which contains the global Metadata schema along with some additional metadata.
-
-An example of the configuration file that will be used for MCP & MCL, which defines a "dataset" entity that is associated with to two aspects: "datasetKey" and "datasetProfile".
-
-```
-# entity-registry.yml
-
-entities:
- - name: dataset
- keyAspect: datasetKey
- aspects:
- - datasetProfile
-```
\ No newline at end of file
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py
deleted file mode 100644
index 330b778ebad8d48d55d09ddd42baa70ec10ae463..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from mmcv.cnn import ConvModule
-
-from mmdet.models.builder import HEADS
-from .fcn_mask_head import FCNMaskHead
-
-
-@HEADS.register_module()
-class HTCMaskHead(FCNMaskHead):
-
- def __init__(self, with_conv_res=True, *args, **kwargs):
- super(HTCMaskHead, self).__init__(*args, **kwargs)
- self.with_conv_res = with_conv_res
- if self.with_conv_res:
- self.conv_res = ConvModule(
- self.conv_out_channels,
- self.conv_out_channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg)
-
- def init_weights(self):
- super(HTCMaskHead, self).init_weights()
- if self.with_conv_res:
- self.conv_res.init_weights()
-
- def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
- if res_feat is not None:
- assert self.with_conv_res
- res_feat = self.conv_res(res_feat)
- x = x + res_feat
- for conv in self.convs:
- x = conv(x)
- res_feat = x
- outs = []
- if return_logits:
- x = self.upsample(x)
- if self.upsample_method == 'deconv':
- x = self.relu(x)
- mask_pred = self.conv_logits(x)
- outs.append(mask_pred)
- if return_feat:
- outs.append(res_feat)
- return outs if len(outs) > 1 else outs[0]
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/datasets/stare.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/datasets/stare.py
deleted file mode 100644
index 15fe527680755815b0f06dfed32f35ee5af02e63..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/datasets/stare.py
+++ /dev/null
@@ -1,39 +0,0 @@
-'''
- * Copyright (c) 2023 Salesforce, Inc.
- * All rights reserved.
- * SPDX-License-Identifier: Apache License 2.0
- * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
- * By Can Qin
- * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
- * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
- * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv
- * Copyright (c) OpenMMLab. All rights reserved.
-'''
-
-import os.path as osp
-
-from .builder import DATASETS
-from .custom import CustomDataset
-
-
-@DATASETS.register_module()
-class STAREDataset(CustomDataset):
- """STARE dataset.
-
- In segmentation map annotation for STARE, 0 stands for background, which is
- included in 2 categories. ``reduce_zero_label`` is fixed to False. The
- ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
- '.ah.png'.
- """
-
- CLASSES = ('background', 'vessel')
-
- PALETTE = [[120, 120, 120], [6, 230, 230]]
-
- def __init__(self, **kwargs):
- super(STAREDataset, self).__init__(
- img_suffix='.png',
- seg_map_suffix='.ah.png',
- reduce_zero_label=False,
- **kwargs)
- assert osp.exists(self.img_dir)
diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/renderer.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/renderer.py
deleted file mode 100644
index 5ae14c5cdb1785226a52ae6b71b08f01de069962..0000000000000000000000000000000000000000
--- a/spaces/abrar-lohia/text-2-character-anim/pyrender/pyrender/renderer.py
+++ /dev/null
@@ -1,1339 +0,0 @@
-"""PBR renderer for Python.
-
-Author: Matthew Matl
-"""
-import sys
-
-import numpy as np
-import PIL
-
-from .constants import (RenderFlags, TextAlign, GLTF, BufFlags, TexFlags,
- ProgramFlags, DEFAULT_Z_FAR, DEFAULT_Z_NEAR,
- SHADOW_TEX_SZ, MAX_N_LIGHTS)
-from .shader_program import ShaderProgramCache
-from .material import MetallicRoughnessMaterial, SpecularGlossinessMaterial
-from .light import PointLight, SpotLight, DirectionalLight
-from .font import FontCache
-from .utils import format_color_vector
-
-from OpenGL.GL import *
-
-
-class Renderer(object):
- """Class for handling all rendering operations on a scene.
-
- Note
- ----
- This renderer relies on the existence of an OpenGL context and
- does not create one on its own.
-
- Parameters
- ----------
- viewport_width : int
- Width of the viewport in pixels.
- viewport_height : int
- Width of the viewport height in pixels.
- point_size : float, optional
- Size of points in pixels. Defaults to 1.0.
- """
-
- def __init__(self, viewport_width, viewport_height, point_size=1.0):
- self.dpscale = 1
- # Scaling needed on retina displays
- if sys.platform == 'darwin':
- self.dpscale = 2
-
- self.viewport_width = viewport_width
- self.viewport_height = viewport_height
- self.point_size = point_size
-
- # Optional framebuffer for offscreen renders
- self._main_fb = None
- self._main_cb = None
- self._main_db = None
- self._main_fb_ms = None
- self._main_cb_ms = None
- self._main_db_ms = None
- self._main_fb_dims = (None, None)
- self._shadow_fb = None
- self._latest_znear = DEFAULT_Z_NEAR
- self._latest_zfar = DEFAULT_Z_FAR
-
- # Shader Program Cache
- self._program_cache = ShaderProgramCache()
- self._font_cache = FontCache()
- self._meshes = set()
- self._mesh_textures = set()
- self._shadow_textures = set()
- self._texture_alloc_idx = 0
-
- @property
- def viewport_width(self):
- """int : The width of the main viewport, in pixels.
- """
- return self._viewport_width
-
- @viewport_width.setter
- def viewport_width(self, value):
- self._viewport_width = self.dpscale * value
-
- @property
- def viewport_height(self):
- """int : The height of the main viewport, in pixels.
- """
- return self._viewport_height
-
- @viewport_height.setter
- def viewport_height(self, value):
- self._viewport_height = self.dpscale * value
-
- @property
- def point_size(self):
- """float : The size of screen-space points, in pixels.
- """
- return self._point_size
-
- @point_size.setter
- def point_size(self, value):
- self._point_size = float(value)
-
- def render(self, scene, flags, seg_node_map=None):
- """Render a scene with the given set of flags.
-
- Parameters
- ----------
- scene : :class:`Scene`
- A scene to render.
- flags : int
- A specification from :class:`.RenderFlags`.
- seg_node_map : dict
- A map from :class:`.Node` objects to (3,) colors for each.
- If specified along with flags set to :attr:`.RenderFlags.SEG`,
- the color image will be a segmentation image.
-
- Returns
- -------
- color_im : (h, w, 3) uint8 or (h, w, 4) uint8
- If :attr:`RenderFlags.OFFSCREEN` is set, the color buffer. This is
- normally an RGB buffer, but if :attr:`.RenderFlags.RGBA` is set,
- the buffer will be a full RGBA buffer.
- depth_im : (h, w) float32
- If :attr:`RenderFlags.OFFSCREEN` is set, the depth buffer
- in linear units.
- """
- # Update context with meshes and textures
- self._update_context(scene, flags)
-
- # Render necessary shadow maps
- if not bool(flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG):
- for ln in scene.light_nodes:
- take_pass = False
- if (isinstance(ln.light, DirectionalLight) and
- bool(flags & RenderFlags.SHADOWS_DIRECTIONAL)):
- take_pass = True
- elif (isinstance(ln.light, SpotLight) and
- bool(flags & RenderFlags.SHADOWS_SPOT)):
- take_pass = True
- elif (isinstance(ln.light, PointLight) and
- bool(flags & RenderFlags.SHADOWS_POINT)):
- take_pass = True
- if take_pass:
- self._shadow_mapping_pass(scene, ln, flags)
-
- # Make forward pass
- retval = self._forward_pass(scene, flags, seg_node_map=seg_node_map)
-
- # If necessary, make normals pass
- if flags & (RenderFlags.VERTEX_NORMALS | RenderFlags.FACE_NORMALS):
- self._normals_pass(scene, flags)
-
- # Update camera settings for retrieving depth buffers
- self._latest_znear = scene.main_camera_node.camera.znear
- self._latest_zfar = scene.main_camera_node.camera.zfar
-
- return retval
-
- def render_text(self, text, x, y, font_name='OpenSans-Regular',
- font_pt=40, color=None, scale=1.0,
- align=TextAlign.BOTTOM_LEFT):
- """Render text into the current viewport.
-
- Note
- ----
- This cannot be done into an offscreen buffer.
-
- Parameters
- ----------
- text : str
- The text to render.
- x : int
- Horizontal pixel location of text.
- y : int
- Vertical pixel location of text.
- font_name : str
- Name of font, from the ``pyrender/fonts`` folder, or
- a path to a ``.ttf`` file.
- font_pt : int
- Height of the text, in font points.
- color : (4,) float
- The color of the text. Default is black.
- scale : int
- Scaling factor for text.
- align : int
- One of the :class:`TextAlign` options which specifies where the
- ``x`` and ``y`` parameters lie on the text. For example,
- :attr:`TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate
- the position of the bottom-left corner of the textbox.
- """
- x *= self.dpscale
- y *= self.dpscale
- font_pt *= self.dpscale
-
- if color is None:
- color = np.array([0.0, 0.0, 0.0, 1.0])
- else:
- color = format_color_vector(color, 4)
-
- # Set up viewport for render
- self._configure_forward_pass_viewport(0)
-
- # Load font
- font = self._font_cache.get_font(font_name, font_pt)
- if not font._in_context():
- font._add_to_context()
-
- # Load program
- program = self._get_text_program()
- program._bind()
-
- # Set uniforms
- p = np.eye(4)
- p[0,0] = 2.0 / self.viewport_width
- p[0,3] = -1.0
- p[1,1] = 2.0 / self.viewport_height
- p[1,3] = -1.0
- program.set_uniform('projection', p)
- program.set_uniform('text_color', color)
-
- # Draw text
- font.render_string(text, x, y, scale, align)
-
- def read_color_buf(self):
- """Read and return the current viewport's color buffer.
-
- Alpha cannot be computed for an on-screen buffer.
-
- Returns
- -------
- color_im : (h, w, 3) uint8
- The color buffer in RGB byte format.
- """
- # Extract color image from frame buffer
- width, height = self.viewport_width, self.viewport_height
- glBindFramebuffer(GL_READ_FRAMEBUFFER, 0)
- glReadBuffer(GL_FRONT)
- color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)
-
- # Re-format them into numpy arrays
- color_im = np.frombuffer(color_buf, dtype=np.uint8)
- color_im = color_im.reshape((height, width, 3))
- color_im = np.flip(color_im, axis=0)
-
- # Resize for macos if needed
- if sys.platform == 'darwin':
- color_im = self._resize_image(color_im, True)
-
- return color_im
-
- def read_depth_buf(self):
- """Read and return the current viewport's color buffer.
-
- Returns
- -------
- depth_im : (h, w) float32
- The depth buffer in linear units.
- """
- width, height = self.viewport_width, self.viewport_height
- glBindFramebuffer(GL_READ_FRAMEBUFFER, 0)
- glReadBuffer(GL_FRONT)
- depth_buf = glReadPixels(
- 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT
- )
-
- depth_im = np.frombuffer(depth_buf, dtype=np.float32)
- depth_im = depth_im.reshape((height, width))
- depth_im = np.flip(depth_im, axis=0)
-
- inf_inds = (depth_im == 1.0)
- depth_im = 2.0 * depth_im - 1.0
- z_near, z_far = self._latest_znear, self._latest_zfar
- noninf = np.logical_not(inf_inds)
- if z_far is None:
- depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf])
- else:
- depth_im[noninf] = ((2.0 * z_near * z_far) /
- (z_far + z_near - depth_im[noninf] *
- (z_far - z_near)))
- depth_im[inf_inds] = 0.0
-
- # Resize for macos if needed
- if sys.platform == 'darwin':
- depth_im = self._resize_image(depth_im)
-
- return depth_im
-
- def delete(self):
- """Free all allocated OpenGL resources.
- """
- # Free shaders
- self._program_cache.clear()
-
- # Free fonts
- self._font_cache.clear()
-
- # Free meshes
- for mesh in self._meshes:
- for p in mesh.primitives:
- p.delete()
-
- # Free textures
- for mesh_texture in self._mesh_textures:
- mesh_texture.delete()
-
- for shadow_texture in self._shadow_textures:
- shadow_texture.delete()
-
- self._meshes = set()
- self._mesh_textures = set()
- self._shadow_textures = set()
- self._texture_alloc_idx = 0
-
- self._delete_main_framebuffer()
- self._delete_shadow_framebuffer()
-
- def __del__(self):
- try:
- self.delete()
- except Exception:
- pass
-
- ###########################################################################
- # Rendering passes
- ###########################################################################
-
- def _forward_pass(self, scene, flags, seg_node_map=None):
- # Set up viewport for render
- self._configure_forward_pass_viewport(flags)
-
- # Clear it
- if bool(flags & RenderFlags.SEG):
- glClearColor(0.0, 0.0, 0.0, 1.0)
- if seg_node_map is None:
- seg_node_map = {}
- else:
- glClearColor(*scene.bg_color)
-
- glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
-
- if not bool(flags & RenderFlags.SEG):
- glEnable(GL_MULTISAMPLE)
- else:
- glDisable(GL_MULTISAMPLE)
-
- # Set up camera matrices
- V, P = self._get_camera_matrices(scene)
-
- program = None
- # Now, render each object in sorted order
- for node in self._sorted_mesh_nodes(scene):
- mesh = node.mesh
-
- # Skip the mesh if it's not visible
- if not mesh.is_visible:
- continue
-
- # If SEG, set color
- if bool(flags & RenderFlags.SEG):
- if node not in seg_node_map:
- continue
- color = seg_node_map[node]
- if not isinstance(color, (list, tuple, np.ndarray)):
- color = np.repeat(color, 3)
- else:
- color = np.asanyarray(color)
- color = color / 255.0
-
- for primitive in mesh.primitives:
-
- # First, get and bind the appropriate program
- program = self._get_primitive_program(
- primitive, flags, ProgramFlags.USE_MATERIAL
- )
- program._bind()
-
- # Set the camera uniforms
- program.set_uniform('V', V)
- program.set_uniform('P', P)
- program.set_uniform(
- 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3]
- )
- if bool(flags & RenderFlags.SEG):
- program.set_uniform('color', color)
-
- # Next, bind the lighting
- if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.FLAT or
- flags & RenderFlags.SEG):
- self._bind_lighting(scene, program, node, flags)
-
- # Finally, bind and draw the primitive
- self._bind_and_draw_primitive(
- primitive=primitive,
- pose=scene.get_pose(node),
- program=program,
- flags=flags
- )
- self._reset_active_textures()
-
- # Unbind the shader and flush the output
- if program is not None:
- program._unbind()
- glFlush()
-
- # If doing offscreen render, copy result from framebuffer and return
- if flags & RenderFlags.OFFSCREEN:
- return self._read_main_framebuffer(scene, flags)
- else:
- return
-
- def _shadow_mapping_pass(self, scene, light_node, flags):
- light = light_node.light
-
- # Set up viewport for render
- self._configure_shadow_mapping_viewport(light, flags)
-
- # Set up camera matrices
- V, P = self._get_light_cam_matrices(scene, light_node, flags)
-
- # Now, render each object in sorted order
- for node in self._sorted_mesh_nodes(scene):
- mesh = node.mesh
-
- # Skip the mesh if it's not visible
- if not mesh.is_visible:
- continue
-
- for primitive in mesh.primitives:
-
- # First, get and bind the appropriate program
- program = self._get_primitive_program(
- primitive, flags, ProgramFlags.NONE
- )
- program._bind()
-
- # Set the camera uniforms
- program.set_uniform('V', V)
- program.set_uniform('P', P)
- program.set_uniform(
- 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3]
- )
-
- # Finally, bind and draw the primitive
- self._bind_and_draw_primitive(
- primitive=primitive,
- pose=scene.get_pose(node),
- program=program,
- flags=RenderFlags.DEPTH_ONLY
- )
- self._reset_active_textures()
-
- # Unbind the shader and flush the output
- if program is not None:
- program._unbind()
- glFlush()
-
- def _normals_pass(self, scene, flags):
- # Set up viewport for render
- self._configure_forward_pass_viewport(flags)
- program = None
-
- # Set up camera matrices
- V, P = self._get_camera_matrices(scene)
-
- # Now, render each object in sorted order
- for node in self._sorted_mesh_nodes(scene):
- mesh = node.mesh
-
- # Skip the mesh if it's not visible
- if not mesh.is_visible:
- continue
-
- for primitive in mesh.primitives:
-
- # Skip objects that don't have normals
- if not primitive.buf_flags & BufFlags.NORMAL:
- continue
-
- # First, get and bind the appropriate program
- pf = ProgramFlags.NONE
- if flags & RenderFlags.VERTEX_NORMALS:
- pf = pf | ProgramFlags.VERTEX_NORMALS
- if flags & RenderFlags.FACE_NORMALS:
- pf = pf | ProgramFlags.FACE_NORMALS
- program = self._get_primitive_program(primitive, flags, pf)
- program._bind()
-
- # Set the camera uniforms
- program.set_uniform('V', V)
- program.set_uniform('P', P)
- program.set_uniform('normal_magnitude', 0.05 * primitive.scale)
- program.set_uniform(
- 'normal_color', np.array([0.1, 0.1, 1.0, 1.0])
- )
-
- # Finally, bind and draw the primitive
- self._bind_and_draw_primitive(
- primitive=primitive,
- pose=scene.get_pose(node),
- program=program,
- flags=RenderFlags.DEPTH_ONLY
- )
- self._reset_active_textures()
-
- # Unbind the shader and flush the output
- if program is not None:
- program._unbind()
- glFlush()
-
- ###########################################################################
- # Handlers for binding uniforms and drawing primitives
- ###########################################################################
-
- def _bind_and_draw_primitive(self, primitive, pose, program, flags):
- # Set model pose matrix
- program.set_uniform('M', pose)
-
- # Bind mesh buffers
- primitive._bind()
-
- # Bind mesh material
- if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG):
- material = primitive.material
-
- # Bind textures
- tf = material.tex_flags
- if tf & TexFlags.NORMAL:
- self._bind_texture(material.normalTexture,
- 'material.normal_texture', program)
- if tf & TexFlags.OCCLUSION:
- self._bind_texture(material.occlusionTexture,
- 'material.occlusion_texture', program)
- if tf & TexFlags.EMISSIVE:
- self._bind_texture(material.emissiveTexture,
- 'material.emissive_texture', program)
- if tf & TexFlags.BASE_COLOR:
- self._bind_texture(material.baseColorTexture,
- 'material.base_color_texture', program)
- if tf & TexFlags.METALLIC_ROUGHNESS:
- self._bind_texture(material.metallicRoughnessTexture,
- 'material.metallic_roughness_texture',
- program)
- if tf & TexFlags.DIFFUSE:
- self._bind_texture(material.diffuseTexture,
- 'material.diffuse_texture', program)
- if tf & TexFlags.SPECULAR_GLOSSINESS:
- self._bind_texture(material.specularGlossinessTexture,
- 'material.specular_glossiness_texture',
- program)
-
- # Bind other uniforms
- b = 'material.{}'
- program.set_uniform(b.format('emissive_factor'),
- material.emissiveFactor)
- if isinstance(material, MetallicRoughnessMaterial):
- program.set_uniform(b.format('base_color_factor'),
- material.baseColorFactor)
- program.set_uniform(b.format('metallic_factor'),
- material.metallicFactor)
- program.set_uniform(b.format('roughness_factor'),
- material.roughnessFactor)
- elif isinstance(material, SpecularGlossinessMaterial):
- program.set_uniform(b.format('diffuse_factor'),
- material.diffuseFactor)
- program.set_uniform(b.format('specular_factor'),
- material.specularFactor)
- program.set_uniform(b.format('glossiness_factor'),
- material.glossinessFactor)
-
- # Set blending options
- if material.alphaMode == 'BLEND':
- glEnable(GL_BLEND)
- glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
- else:
- glEnable(GL_BLEND)
- glBlendFunc(GL_ONE, GL_ZERO)
-
- # Set wireframe mode
- wf = material.wireframe
- if flags & RenderFlags.FLIP_WIREFRAME:
- wf = not wf
- if (flags & RenderFlags.ALL_WIREFRAME) or wf:
- glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
- else:
- glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
-
- # Set culling mode
- if material.doubleSided or flags & RenderFlags.SKIP_CULL_FACES:
- glDisable(GL_CULL_FACE)
- else:
- glEnable(GL_CULL_FACE)
- glCullFace(GL_BACK)
- else:
- glEnable(GL_CULL_FACE)
- glEnable(GL_BLEND)
- glCullFace(GL_BACK)
- glBlendFunc(GL_ONE, GL_ZERO)
- glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
-
- # Set point size if needed
- glDisable(GL_PROGRAM_POINT_SIZE)
- if primitive.mode == GLTF.POINTS:
- glEnable(GL_PROGRAM_POINT_SIZE)
- glPointSize(self.point_size)
-
- # Render mesh
- n_instances = 1
- if primitive.poses is not None:
- n_instances = len(primitive.poses)
-
- if primitive.indices is not None:
- glDrawElementsInstanced(
- primitive.mode, primitive.indices.size, GL_UNSIGNED_INT,
- ctypes.c_void_p(0), n_instances
- )
- else:
- glDrawArraysInstanced(
- primitive.mode, 0, len(primitive.positions), n_instances
- )
-
- # Unbind mesh buffers
- primitive._unbind()
-
- def _bind_lighting(self, scene, program, node, flags):
- """Bind all lighting uniform values for a scene.
- """
- max_n_lights = self._compute_max_n_lights(flags)
-
- n_d = min(len(scene.directional_light_nodes), max_n_lights[0])
- n_s = min(len(scene.spot_light_nodes), max_n_lights[1])
- n_p = min(len(scene.point_light_nodes), max_n_lights[2])
- program.set_uniform('ambient_light', scene.ambient_light)
- program.set_uniform('n_directional_lights', n_d)
- program.set_uniform('n_spot_lights', n_s)
- program.set_uniform('n_point_lights', n_p)
- plc = 0
- slc = 0
- dlc = 0
-
- light_nodes = scene.light_nodes
- if (len(scene.directional_light_nodes) > max_n_lights[0] or
- len(scene.spot_light_nodes) > max_n_lights[1] or
- len(scene.point_light_nodes) > max_n_lights[2]):
- light_nodes = self._sorted_nodes_by_distance(
- scene, scene.light_nodes, node
- )
-
- for n in light_nodes:
- light = n.light
- pose = scene.get_pose(n)
- position = pose[:3,3]
- direction = -pose[:3,2]
-
- if isinstance(light, PointLight):
- if plc == max_n_lights[2]:
- continue
- b = 'point_lights[{}].'.format(plc)
- plc += 1
- shadow = bool(flags & RenderFlags.SHADOWS_POINT)
- program.set_uniform(b + 'position', position)
- elif isinstance(light, SpotLight):
- if slc == max_n_lights[1]:
- continue
- b = 'spot_lights[{}].'.format(slc)
- slc += 1
- shadow = bool(flags & RenderFlags.SHADOWS_SPOT)
- las = 1.0 / max(0.001, np.cos(light.innerConeAngle) -
- np.cos(light.outerConeAngle))
- lao = -np.cos(light.outerConeAngle) * las
- program.set_uniform(b + 'direction', direction)
- program.set_uniform(b + 'position', position)
- program.set_uniform(b + 'light_angle_scale', las)
- program.set_uniform(b + 'light_angle_offset', lao)
- else:
- if dlc == max_n_lights[0]:
- continue
- b = 'directional_lights[{}].'.format(dlc)
- dlc += 1
- shadow = bool(flags & RenderFlags.SHADOWS_DIRECTIONAL)
- program.set_uniform(b + 'direction', direction)
-
- program.set_uniform(b + 'color', light.color)
- program.set_uniform(b + 'intensity', light.intensity)
- # if light.range is not None:
- # program.set_uniform(b + 'range', light.range)
- # else:
- # program.set_uniform(b + 'range', 0)
-
- if shadow:
- self._bind_texture(light.shadow_texture,
- b + 'shadow_map', program)
- if not isinstance(light, PointLight):
- V, P = self._get_light_cam_matrices(scene, n, flags)
- program.set_uniform(b + 'light_matrix', P.dot(V))
- else:
- raise NotImplementedError(
- 'Point light shadows not implemented'
- )
-
- def _sorted_mesh_nodes(self, scene):
- cam_loc = scene.get_pose(scene.main_camera_node)[:3,3]
- solid_nodes = []
- trans_nodes = []
- for node in scene.mesh_nodes:
- mesh = node.mesh
- if mesh.is_transparent:
- trans_nodes.append(node)
- else:
- solid_nodes.append(node)
-
- # TODO BETTER SORTING METHOD
- trans_nodes.sort(
- key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc)
- )
- solid_nodes.sort(
- key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc)
- )
-
- return solid_nodes + trans_nodes
-
- def _sorted_nodes_by_distance(self, scene, nodes, compare_node):
- nodes = list(nodes)
- compare_posn = scene.get_pose(compare_node)[:3,3]
- nodes.sort(key=lambda n: np.linalg.norm(
- scene.get_pose(n)[:3,3] - compare_posn)
- )
- return nodes
-
- ###########################################################################
- # Context Management
- ###########################################################################
-
- def _update_context(self, scene, flags):
-
- # Update meshes
- scene_meshes = scene.meshes
-
- # Add new meshes to context
- for mesh in scene_meshes - self._meshes:
- for p in mesh.primitives:
- p._add_to_context()
-
- # Remove old meshes from context
- for mesh in self._meshes - scene_meshes:
- for p in mesh.primitives:
- p.delete()
-
- self._meshes = scene_meshes.copy()
-
- # Update mesh textures
- mesh_textures = set()
- for m in scene_meshes:
- for p in m.primitives:
- mesh_textures |= p.material.textures
-
- # Add new textures to context
- for texture in mesh_textures - self._mesh_textures:
- texture._add_to_context()
-
- # Remove old textures from context
- for texture in self._mesh_textures - mesh_textures:
- texture.delete()
-
- self._mesh_textures = mesh_textures.copy()
-
- shadow_textures = set()
- for l in scene.lights:
- # Create if needed
- active = False
- if (isinstance(l, DirectionalLight) and
- flags & RenderFlags.SHADOWS_DIRECTIONAL):
- active = True
- elif (isinstance(l, PointLight) and
- flags & RenderFlags.SHADOWS_POINT):
- active = True
- elif isinstance(l, SpotLight) and flags & RenderFlags.SHADOWS_SPOT:
- active = True
-
- if active and l.shadow_texture is None:
- l._generate_shadow_texture()
- if l.shadow_texture is not None:
- shadow_textures.add(l.shadow_texture)
-
- # Add new textures to context
- for texture in shadow_textures - self._shadow_textures:
- texture._add_to_context()
-
- # Remove old textures from context
- for texture in self._shadow_textures - shadow_textures:
- texture.delete()
-
- self._shadow_textures = shadow_textures.copy()
-
- ###########################################################################
- # Texture Management
- ###########################################################################
-
- def _bind_texture(self, texture, uniform_name, program):
- """Bind a texture to an active texture unit and return
- the texture unit index that was used.
- """
- tex_id = self._get_next_active_texture()
- glActiveTexture(GL_TEXTURE0 + tex_id)
- texture._bind()
- program.set_uniform(uniform_name, tex_id)
-
- def _get_next_active_texture(self):
- val = self._texture_alloc_idx
- self._texture_alloc_idx += 1
- return val
-
- def _reset_active_textures(self):
- self._texture_alloc_idx = 0
-
- ###########################################################################
- # Camera Matrix Management
- ###########################################################################
-
- def _get_camera_matrices(self, scene):
- main_camera_node = scene.main_camera_node
- if main_camera_node is None:
- raise ValueError('Cannot render scene without a camera')
- P = main_camera_node.camera.get_projection_matrix(
- width=self.viewport_width, height=self.viewport_height
- )
- pose = scene.get_pose(main_camera_node)
- V = np.linalg.inv(pose) # V maps from world to camera
- return V, P
-
- def _get_light_cam_matrices(self, scene, light_node, flags):
- light = light_node.light
- pose = scene.get_pose(light_node).copy()
- s = scene.scale
- camera = light._get_shadow_camera(s)
- P = camera.get_projection_matrix()
- if isinstance(light, DirectionalLight):
- direction = -pose[:3,2]
- c = scene.centroid
- loc = c - direction * s
- pose[:3,3] = loc
- V = np.linalg.inv(pose) # V maps from world to camera
- return V, P
-
- ###########################################################################
- # Shader Program Management
- ###########################################################################
-
- def _get_text_program(self):
- program = self._program_cache.get_program(
- vertex_shader='text.vert',
- fragment_shader='text.frag'
- )
-
- if not program._in_context():
- program._add_to_context()
-
- return program
-
- def _compute_max_n_lights(self, flags):
- max_n_lights = [MAX_N_LIGHTS, MAX_N_LIGHTS, MAX_N_LIGHTS]
- n_tex_units = glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS)
-
- # Reserved texture units: 6
- # Normal Map
- # Occlusion Map
- # Emissive Map
- # Base Color or Diffuse Map
- # MR or SG Map
- # Environment cubemap
-
- n_reserved_textures = 6
- n_available_textures = n_tex_units - n_reserved_textures
-
- # Distribute textures evenly among lights with shadows, with
- # a preference for directional lights
- n_shadow_types = 0
- if flags & RenderFlags.SHADOWS_DIRECTIONAL:
- n_shadow_types += 1
- if flags & RenderFlags.SHADOWS_SPOT:
- n_shadow_types += 1
- if flags & RenderFlags.SHADOWS_POINT:
- n_shadow_types += 1
-
- if n_shadow_types > 0:
- tex_per_light = n_available_textures // n_shadow_types
-
- if flags & RenderFlags.SHADOWS_DIRECTIONAL:
- max_n_lights[0] = (
- tex_per_light +
- (n_available_textures - tex_per_light * n_shadow_types)
- )
- if flags & RenderFlags.SHADOWS_SPOT:
- max_n_lights[1] = tex_per_light
- if flags & RenderFlags.SHADOWS_POINT:
- max_n_lights[2] = tex_per_light
-
- return max_n_lights
-
- def _get_primitive_program(self, primitive, flags, program_flags):
- vertex_shader = None
- fragment_shader = None
- geometry_shader = None
- defines = {}
-
- if (bool(program_flags & ProgramFlags.USE_MATERIAL) and
- not flags & RenderFlags.DEPTH_ONLY and
- not flags & RenderFlags.FLAT and
- not flags & RenderFlags.SEG):
- vertex_shader = 'mesh.vert'
- fragment_shader = 'mesh.frag'
- elif bool(program_flags & (ProgramFlags.VERTEX_NORMALS |
- ProgramFlags.FACE_NORMALS)):
- vertex_shader = 'vertex_normals.vert'
- if primitive.mode == GLTF.POINTS:
- geometry_shader = 'vertex_normals_pc.geom'
- else:
- geometry_shader = 'vertex_normals.geom'
- fragment_shader = 'vertex_normals.frag'
- elif flags & RenderFlags.FLAT:
- vertex_shader = 'flat.vert'
- fragment_shader = 'flat.frag'
- elif flags & RenderFlags.SEG:
- vertex_shader = 'segmentation.vert'
- fragment_shader = 'segmentation.frag'
- else:
- vertex_shader = 'mesh_depth.vert'
- fragment_shader = 'mesh_depth.frag'
-
- # Set up vertex buffer DEFINES
- bf = primitive.buf_flags
- buf_idx = 1
- if bf & BufFlags.NORMAL:
- defines['NORMAL_LOC'] = buf_idx
- buf_idx += 1
- if bf & BufFlags.TANGENT:
- defines['TANGENT_LOC'] = buf_idx
- buf_idx += 1
- if bf & BufFlags.TEXCOORD_0:
- defines['TEXCOORD_0_LOC'] = buf_idx
- buf_idx += 1
- if bf & BufFlags.TEXCOORD_1:
- defines['TEXCOORD_1_LOC'] = buf_idx
- buf_idx += 1
- if bf & BufFlags.COLOR_0:
- defines['COLOR_0_LOC'] = buf_idx
- buf_idx += 1
- if bf & BufFlags.JOINTS_0:
- defines['JOINTS_0_LOC'] = buf_idx
- buf_idx += 1
- if bf & BufFlags.WEIGHTS_0:
- defines['WEIGHTS_0_LOC'] = buf_idx
- buf_idx += 1
- defines['INST_M_LOC'] = buf_idx
-
- # Set up shadow mapping defines
- if flags & RenderFlags.SHADOWS_DIRECTIONAL:
- defines['DIRECTIONAL_LIGHT_SHADOWS'] = 1
- if flags & RenderFlags.SHADOWS_SPOT:
- defines['SPOT_LIGHT_SHADOWS'] = 1
- if flags & RenderFlags.SHADOWS_POINT:
- defines['POINT_LIGHT_SHADOWS'] = 1
- max_n_lights = self._compute_max_n_lights(flags)
- defines['MAX_DIRECTIONAL_LIGHTS'] = max_n_lights[0]
- defines['MAX_SPOT_LIGHTS'] = max_n_lights[1]
- defines['MAX_POINT_LIGHTS'] = max_n_lights[2]
-
- # Set up vertex normal defines
- if program_flags & ProgramFlags.VERTEX_NORMALS:
- defines['VERTEX_NORMALS'] = 1
- if program_flags & ProgramFlags.FACE_NORMALS:
- defines['FACE_NORMALS'] = 1
-
- # Set up material texture defines
- if bool(program_flags & ProgramFlags.USE_MATERIAL):
- tf = primitive.material.tex_flags
- if tf & TexFlags.NORMAL:
- defines['HAS_NORMAL_TEX'] = 1
- if tf & TexFlags.OCCLUSION:
- defines['HAS_OCCLUSION_TEX'] = 1
- if tf & TexFlags.EMISSIVE:
- defines['HAS_EMISSIVE_TEX'] = 1
- if tf & TexFlags.BASE_COLOR:
- defines['HAS_BASE_COLOR_TEX'] = 1
- if tf & TexFlags.METALLIC_ROUGHNESS:
- defines['HAS_METALLIC_ROUGHNESS_TEX'] = 1
- if tf & TexFlags.DIFFUSE:
- defines['HAS_DIFFUSE_TEX'] = 1
- if tf & TexFlags.SPECULAR_GLOSSINESS:
- defines['HAS_SPECULAR_GLOSSINESS_TEX'] = 1
- if isinstance(primitive.material, MetallicRoughnessMaterial):
- defines['USE_METALLIC_MATERIAL'] = 1
- elif isinstance(primitive.material, SpecularGlossinessMaterial):
- defines['USE_GLOSSY_MATERIAL'] = 1
-
- program = self._program_cache.get_program(
- vertex_shader=vertex_shader,
- fragment_shader=fragment_shader,
- geometry_shader=geometry_shader,
- defines=defines
- )
-
- if not program._in_context():
- program._add_to_context()
-
- return program
-
- ###########################################################################
- # Viewport Management
- ###########################################################################
-
- def _configure_forward_pass_viewport(self, flags):
-
- # If using offscreen render, bind main framebuffer
- if flags & RenderFlags.OFFSCREEN:
- self._configure_main_framebuffer()
- glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms)
- else:
- glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)
-
- glViewport(0, 0, self.viewport_width, self.viewport_height)
- glEnable(GL_DEPTH_TEST)
- glDepthMask(GL_TRUE)
- glDepthFunc(GL_LESS)
- glDepthRange(0.0, 1.0)
-
- def _configure_shadow_mapping_viewport(self, light, flags):
- self._configure_shadow_framebuffer()
- glBindFramebuffer(GL_FRAMEBUFFER, self._shadow_fb)
- light.shadow_texture._bind()
- light.shadow_texture._bind_as_depth_attachment()
- glActiveTexture(GL_TEXTURE0)
- light.shadow_texture._bind()
- glDrawBuffer(GL_NONE)
- glReadBuffer(GL_NONE)
-
- glClear(GL_DEPTH_BUFFER_BIT)
- glViewport(0, 0, SHADOW_TEX_SZ, SHADOW_TEX_SZ)
- glEnable(GL_DEPTH_TEST)
- glDepthMask(GL_TRUE)
- glDepthFunc(GL_LESS)
- glDepthRange(0.0, 1.0)
- glDisable(GL_CULL_FACE)
- glDisable(GL_BLEND)
-
- ###########################################################################
- # Framebuffer Management
- ###########################################################################
-
- def _configure_shadow_framebuffer(self):
- if self._shadow_fb is None:
- self._shadow_fb = glGenFramebuffers(1)
-
- def _delete_shadow_framebuffer(self):
- if self._shadow_fb is not None:
- glDeleteFramebuffers(1, [self._shadow_fb])
-
- def _configure_main_framebuffer(self):
- # If mismatch with prior framebuffer, delete it
- if (self._main_fb is not None and
- self.viewport_width != self._main_fb_dims[0] or
- self.viewport_height != self._main_fb_dims[1]):
- self._delete_main_framebuffer()
-
- # If framebuffer doesn't exist, create it
- if self._main_fb is None:
- # Generate standard buffer
- self._main_cb, self._main_db = glGenRenderbuffers(2)
-
- glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb)
- glRenderbufferStorage(
- GL_RENDERBUFFER, GL_RGBA,
- self.viewport_width, self.viewport_height
- )
-
- glBindRenderbuffer(GL_RENDERBUFFER, self._main_db)
- glRenderbufferStorage(
- GL_RENDERBUFFER, GL_DEPTH_COMPONENT24,
- self.viewport_width, self.viewport_height
- )
-
- self._main_fb = glGenFramebuffers(1)
- glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb)
- glFramebufferRenderbuffer(
- GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- GL_RENDERBUFFER, self._main_cb
- )
- glFramebufferRenderbuffer(
- GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
- GL_RENDERBUFFER, self._main_db
- )
-
- # Generate multisample buffer
- self._main_cb_ms, self._main_db_ms = glGenRenderbuffers(2)
- glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb_ms)
- # glRenderbufferStorageMultisample(
- # GL_RENDERBUFFER, 4, GL_RGBA,
- # self.viewport_width, self.viewport_height
- # )
- # glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms)
- # glRenderbufferStorageMultisample(
- # GL_RENDERBUFFER, 4, GL_DEPTH_COMPONENT24,
- # self.viewport_width, self.viewport_height
- # )
- # 增加这一行
- num_samples = min(glGetIntegerv(GL_MAX_SAMPLES), 4) # No more than GL_MAX_SAMPLES
-
- # 其实就是把 4 替换成 num_samples,其余不变
- glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_RGBA, self.viewport_width, self.viewport_height)
-
- glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) # 这行不变
-
- # 这一行也是将 4 替换成 num_samples
- glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_DEPTH_COMPONENT24, self.viewport_width, self.viewport_height)
-
- self._main_fb_ms = glGenFramebuffers(1)
- glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms)
- glFramebufferRenderbuffer(
- GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- GL_RENDERBUFFER, self._main_cb_ms
- )
- glFramebufferRenderbuffer(
- GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
- GL_RENDERBUFFER, self._main_db_ms
- )
-
- self._main_fb_dims = (self.viewport_width, self.viewport_height)
-
- def _delete_main_framebuffer(self):
- if self._main_fb is not None:
- glDeleteFramebuffers(2, [self._main_fb, self._main_fb_ms])
- if self._main_cb is not None:
- glDeleteRenderbuffers(2, [self._main_cb, self._main_cb_ms])
- if self._main_db is not None:
- glDeleteRenderbuffers(2, [self._main_db, self._main_db_ms])
-
- self._main_fb = None
- self._main_cb = None
- self._main_db = None
- self._main_fb_ms = None
- self._main_cb_ms = None
- self._main_db_ms = None
- self._main_fb_dims = (None, None)
-
- def _read_main_framebuffer(self, scene, flags):
- width, height = self._main_fb_dims[0], self._main_fb_dims[1]
-
- # Bind framebuffer and blit buffers
- glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb_ms)
- glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb)
- glBlitFramebuffer(
- 0, 0, width, height, 0, 0, width, height,
- GL_COLOR_BUFFER_BIT, GL_LINEAR
- )
- glBlitFramebuffer(
- 0, 0, width, height, 0, 0, width, height,
- GL_DEPTH_BUFFER_BIT, GL_NEAREST
- )
- glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb)
-
- # Read depth
- depth_buf = glReadPixels(
- 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT
- )
- depth_im = np.frombuffer(depth_buf, dtype=np.float32)
- depth_im = depth_im.reshape((height, width))
- depth_im = np.flip(depth_im, axis=0)
- inf_inds = (depth_im == 1.0)
- depth_im = 2.0 * depth_im - 1.0
- z_near = scene.main_camera_node.camera.znear
- z_far = scene.main_camera_node.camera.zfar
- noninf = np.logical_not(inf_inds)
- if z_far is None:
- depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf])
- else:
- depth_im[noninf] = ((2.0 * z_near * z_far) /
- (z_far + z_near - depth_im[noninf] *
- (z_far - z_near)))
- depth_im[inf_inds] = 0.0
-
- # Resize for macos if needed
- if sys.platform == 'darwin':
- depth_im = self._resize_image(depth_im)
-
- if flags & RenderFlags.DEPTH_ONLY:
- return depth_im
-
- # Read color
- if flags & RenderFlags.RGBA:
- color_buf = glReadPixels(
- 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE
- )
- color_im = np.frombuffer(color_buf, dtype=np.uint8)
- color_im = color_im.reshape((height, width, 4))
- else:
- color_buf = glReadPixels(
- 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE
- )
- color_im = np.frombuffer(color_buf, dtype=np.uint8)
- color_im = color_im.reshape((height, width, 3))
- color_im = np.flip(color_im, axis=0)
-
- # Resize for macos if needed
- if sys.platform == 'darwin':
- color_im = self._resize_image(color_im, True)
-
- return color_im, depth_im
-
- def _resize_image(self, value, antialias=False):
- """If needed, rescale the render for MacOS."""
- img = PIL.Image.fromarray(value)
- resample = PIL.Image.NEAREST
- if antialias:
- resample = PIL.Image.BILINEAR
- size = (self.viewport_width // self.dpscale,
- self.viewport_height // self.dpscale)
- img = img.resize(size, resample=resample)
- return np.array(img)
-
- ###########################################################################
- # Shadowmap Debugging
- ###########################################################################
-
- def _forward_pass_no_reset(self, scene, flags):
- # Set up camera matrices
- V, P = self._get_camera_matrices(scene)
-
- # Now, render each object in sorted order
- for node in self._sorted_mesh_nodes(scene):
- mesh = node.mesh
-
- # Skip the mesh if it's not visible
- if not mesh.is_visible:
- continue
-
- for primitive in mesh.primitives:
-
- # First, get and bind the appropriate program
- program = self._get_primitive_program(
- primitive, flags, ProgramFlags.USE_MATERIAL
- )
- program._bind()
-
- # Set the camera uniforms
- program.set_uniform('V', V)
- program.set_uniform('P', P)
- program.set_uniform(
- 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3]
- )
-
- # Next, bind the lighting
- if not flags & RenderFlags.DEPTH_ONLY and not flags & RenderFlags.FLAT:
- self._bind_lighting(scene, program, node, flags)
-
- # Finally, bind and draw the primitive
- self._bind_and_draw_primitive(
- primitive=primitive,
- pose=scene.get_pose(node),
- program=program,
- flags=flags
- )
- self._reset_active_textures()
-
- # Unbind the shader and flush the output
- if program is not None:
- program._unbind()
- glFlush()
-
- def _render_light_shadowmaps(self, scene, light_nodes, flags, tile=False):
- glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)
- glClearColor(*scene.bg_color)
- glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
- glEnable(GL_DEPTH_TEST)
- glDepthMask(GL_TRUE)
- glDepthFunc(GL_LESS)
- glDepthRange(0.0, 1.0)
-
- w = self.viewport_width
- h = self.viewport_height
-
- num_nodes = len(light_nodes)
- viewport_dims = {
- (0, 2): [0, h // 2, w // 2, h],
- (1, 2): [w // 2, h // 2, w, h],
- (0, 3): [0, h // 2, w // 2, h],
- (1, 3): [w // 2, h // 2, w, h],
- (2, 3): [0, 0, w // 2, h // 2],
- (0, 4): [0, h // 2, w // 2, h],
- (1, 4): [w // 2, h // 2, w, h],
- (2, 4): [0, 0, w // 2, h // 2],
- (3, 4): [w // 2, 0, w, h // 2]
- }
-
- if tile:
- for i, ln in enumerate(light_nodes):
- light = ln.light
-
- if light.shadow_texture is None:
- raise ValueError('Light does not have a shadow texture')
-
- glViewport(*viewport_dims[(i, num_nodes + 1)])
-
- program = self._get_debug_quad_program()
- program._bind()
- self._bind_texture(light.shadow_texture, 'depthMap', program)
- self._render_debug_quad()
- self._reset_active_textures()
- glFlush()
- i += 1
- glViewport(*viewport_dims[(i, num_nodes + 1)])
- self._forward_pass_no_reset(scene, flags)
- else:
- for i, ln in enumerate(light_nodes):
- light = ln.light
-
- if light.shadow_texture is None:
- raise ValueError('Light does not have a shadow texture')
-
- glViewport(0, 0, self.viewport_width, self.viewport_height)
-
- program = self._get_debug_quad_program()
- program._bind()
- self._bind_texture(light.shadow_texture, 'depthMap', program)
- self._render_debug_quad()
- self._reset_active_textures()
- glFlush()
- return
-
- def _get_debug_quad_program(self):
- program = self._program_cache.get_program(
- vertex_shader='debug_quad.vert',
- fragment_shader='debug_quad.frag'
- )
- if not program._in_context():
- program._add_to_context()
- return program
-
- def _render_debug_quad(self):
- x = glGenVertexArrays(1)
- glBindVertexArray(x)
- glDrawArrays(GL_TRIANGLES, 0, 6)
- glBindVertexArray(0)
- glDeleteVertexArrays(1, [x])
diff --git a/spaces/afiz/sepia-image/README.md b/spaces/afiz/sepia-image/README.md
deleted file mode 100644
index 5ef9605ee28baa91a5897b950cfe9ff4e806a57b..0000000000000000000000000000000000000000
--- a/spaces/afiz/sepia-image/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Sepia Image
-emoji: 📚
-colorFrom: pink
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.1.5
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/base_callbacks.py b/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/base_callbacks.py
deleted file mode 100644
index ef62dd591f1516aa41e2ba347cc3aaa558854f8d..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Music_Source_Separation/bytesep/callbacks/base_callbacks.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import logging
-import os
-from typing import NoReturn
-
-import pytorch_lightning as pl
-import torch
-import torch.nn as nn
-from pytorch_lightning.utilities import rank_zero_only
-
-
-class SaveCheckpointsCallback(pl.Callback):
- def __init__(
- self,
- model: nn.Module,
- checkpoints_dir: str,
- save_step_frequency: int,
- ):
- r"""Callback to save checkpoints every #save_step_frequency steps.
-
- Args:
- model: nn.Module
- checkpoints_dir: str, directory to save checkpoints
- save_step_frequency: int
- """
- self.model = model
- self.checkpoints_dir = checkpoints_dir
- self.save_step_frequency = save_step_frequency
- os.makedirs(self.checkpoints_dir, exist_ok=True)
-
- @rank_zero_only
- def on_batch_end(self, trainer: pl.Trainer, _) -> NoReturn:
- r"""Save checkpoint."""
- global_step = trainer.global_step
-
- if global_step % self.save_step_frequency == 0:
-
- checkpoint_path = os.path.join(
- self.checkpoints_dir, "step={}.pth".format(global_step)
- )
-
- checkpoint = {'step': global_step, 'model': self.model.state_dict()}
-
- torch.save(checkpoint, checkpoint_path)
- logging.info("Save checkpoint to {}".format(checkpoint_path))
diff --git a/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py b/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py
deleted file mode 100644
index 8bd45a930d3dc84912e58659ee575be08e9038f0..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# -*- coding: utf-8 -*-
-# File : test_numeric_batchnorm.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 27/01/2018
-#
-# This file is part of Synchronized-BatchNorm-PyTorch.
-
-import unittest
-
-import torch
-import torch.nn as nn
-from torch.autograd import Variable
-
-from sync_batchnorm.unittest import TorchTestCase
-
-
-def handy_var(a, unbias=True):
- n = a.size(0)
- asum = a.sum(dim=0)
- as_sum = (a ** 2).sum(dim=0) # a square sum
- sumvar = as_sum - asum * asum / n
- if unbias:
- return sumvar / (n - 1)
- else:
- return sumvar / n
-
-
-class NumericTestCase(TorchTestCase):
- def testNumericBatchNorm(self):
- a = torch.rand(16, 10)
- bn = nn.BatchNorm2d(10, momentum=1, eps=1e-5, affine=False)
- bn.train()
-
- a_var1 = Variable(a, requires_grad=True)
- b_var1 = bn(a_var1)
- loss1 = b_var1.sum()
- loss1.backward()
-
- a_var2 = Variable(a, requires_grad=True)
- a_mean2 = a_var2.mean(dim=0, keepdim=True)
- a_std2 = torch.sqrt(handy_var(a_var2, unbias=False).clamp(min=1e-5))
- # a_std2 = torch.sqrt(a_var2.var(dim=0, keepdim=True, unbiased=False) + 1e-5)
- b_var2 = (a_var2 - a_mean2) / a_std2
- loss2 = b_var2.sum()
- loss2.backward()
-
- self.assertTensorClose(bn.running_mean, a.mean(dim=0))
- self.assertTensorClose(bn.running_var, handy_var(a))
- self.assertTensorClose(a_var1.data, a_var2.data)
- self.assertTensorClose(b_var1.data, b_var2.data)
- self.assertTensorClose(a_var1.grad, a_var2.grad)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/spaces/akhaliq/stylegan3_clip/torch_utils/training_stats.py b/spaces/akhaliq/stylegan3_clip/torch_utils/training_stats.py
deleted file mode 100644
index 64e7835210d51d923e6a45240d27020a20e219de..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/stylegan3_clip/torch_utils/training_stats.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Facilities for reporting and collecting training statistics across
-multiple processes and devices. The interface is designed to minimize
-synchronization overhead as well as the amount of boilerplate in user
-code."""
-
-import re
-import numpy as np
-import torch
-import dnnlib
-
-from . import misc
-
-#----------------------------------------------------------------------------
-
-_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
-_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
-_counter_dtype = torch.float64 # Data type to use for the internal counters.
-_rank = 0 # Rank of the current process.
-_sync_device = None # Device to use for multiprocess communication. None = single-process.
-_sync_called = False # Has _sync() been called yet?
-_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
-_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
-
-#----------------------------------------------------------------------------
-
-def init_multiprocessing(rank, sync_device):
- r"""Initializes `torch_utils.training_stats` for collecting statistics
- across multiple processes.
-
- This function must be called after
- `torch.distributed.init_process_group()` and before `Collector.update()`.
- The call is not necessary if multi-process collection is not needed.
-
- Args:
- rank: Rank of the current process.
- sync_device: PyTorch device to use for inter-process
- communication, or None to disable multi-process
- collection. Typically `torch.device('cuda', rank)`.
- """
- global _rank, _sync_device
- assert not _sync_called
- _rank = rank
- _sync_device = sync_device
-
-#----------------------------------------------------------------------------
-
-@misc.profiled_function
-def report(name, value):
- r"""Broadcasts the given set of scalars to all interested instances of
- `Collector`, across device and process boundaries.
-
- This function is expected to be extremely cheap and can be safely
- called from anywhere in the training loop, loss function, or inside a
- `torch.nn.Module`.
-
- Warning: The current implementation expects the set of unique names to
- be consistent across processes. Please make sure that `report()` is
- called at least once for each unique name by each process, and in the
- same order. If a given process has no scalars to broadcast, it can do
- `report(name, [])` (empty list).
-
- Args:
- name: Arbitrary string specifying the name of the statistic.
- Averages are accumulated separately for each unique name.
- value: Arbitrary set of scalars. Can be a list, tuple,
- NumPy array, PyTorch tensor, or Python scalar.
-
- Returns:
- The same `value` that was passed in.
- """
- if name not in _counters:
- _counters[name] = dict()
-
- elems = torch.as_tensor(value)
- if elems.numel() == 0:
- return value
-
- elems = elems.detach().flatten().to(_reduce_dtype)
- moments = torch.stack([
- torch.ones_like(elems).sum(),
- elems.sum(),
- elems.square().sum(),
- ])
- assert moments.ndim == 1 and moments.shape[0] == _num_moments
- moments = moments.to(_counter_dtype)
-
- device = moments.device
- if device not in _counters[name]:
- _counters[name][device] = torch.zeros_like(moments)
- _counters[name][device].add_(moments)
- return value
-
-#----------------------------------------------------------------------------
-
-def report0(name, value):
- r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
- but ignores any scalars provided by the other processes.
- See `report()` for further details.
- """
- report(name, value if _rank == 0 else [])
- return value
-
-#----------------------------------------------------------------------------
-
-class Collector:
- r"""Collects the scalars broadcasted by `report()` and `report0()` and
- computes their long-term averages (mean and standard deviation) over
- user-defined periods of time.
-
- The averages are first collected into internal counters that are not
- directly visible to the user. They are then copied to the user-visible
- state as a result of calling `update()` and can then be queried using
- `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
- internal counters for the next round, so that the user-visible state
- effectively reflects averages collected between the last two calls to
- `update()`.
-
- Args:
- regex: Regular expression defining which statistics to
- collect. The default is to collect everything.
- keep_previous: Whether to retain the previous averages if no
- scalars were collected on a given round
- (default: True).
- """
- def __init__(self, regex='.*', keep_previous=True):
- self._regex = re.compile(regex)
- self._keep_previous = keep_previous
- self._cumulative = dict()
- self._moments = dict()
- self.update()
- self._moments.clear()
-
- def names(self):
- r"""Returns the names of all statistics broadcasted so far that
- match the regular expression specified at construction time.
- """
- return [name for name in _counters if self._regex.fullmatch(name)]
-
- def update(self):
- r"""Copies current values of the internal counters to the
- user-visible state and resets them for the next round.
-
- If `keep_previous=True` was specified at construction time, the
- operation is skipped for statistics that have received no scalars
- since the last update, retaining their previous averages.
-
- This method performs a number of GPU-to-CPU transfers and one
- `torch.distributed.all_reduce()`. It is intended to be called
- periodically in the main training loop, typically once every
- N training steps.
- """
- if not self._keep_previous:
- self._moments.clear()
- for name, cumulative in _sync(self.names()):
- if name not in self._cumulative:
- self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
- delta = cumulative - self._cumulative[name]
- self._cumulative[name].copy_(cumulative)
- if float(delta[0]) != 0:
- self._moments[name] = delta
-
- def _get_delta(self, name):
- r"""Returns the raw moments that were accumulated for the given
- statistic between the last two calls to `update()`, or zero if
- no scalars were collected.
- """
- assert self._regex.fullmatch(name)
- if name not in self._moments:
- self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
- return self._moments[name]
-
- def num(self, name):
- r"""Returns the number of scalars that were accumulated for the given
- statistic between the last two calls to `update()`, or zero if
- no scalars were collected.
- """
- delta = self._get_delta(name)
- return int(delta[0])
-
- def mean(self, name):
- r"""Returns the mean of the scalars that were accumulated for the
- given statistic between the last two calls to `update()`, or NaN if
- no scalars were collected.
- """
- delta = self._get_delta(name)
- if int(delta[0]) == 0:
- return float('nan')
- return float(delta[1] / delta[0])
-
- def std(self, name):
- r"""Returns the standard deviation of the scalars that were
- accumulated for the given statistic between the last two calls to
- `update()`, or NaN if no scalars were collected.
- """
- delta = self._get_delta(name)
- if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
- return float('nan')
- if int(delta[0]) == 1:
- return float(0)
- mean = float(delta[1] / delta[0])
- raw_var = float(delta[2] / delta[0])
- return np.sqrt(max(raw_var - np.square(mean), 0))
-
- def as_dict(self):
- r"""Returns the averages accumulated between the last two calls to
- `update()` as an `dnnlib.EasyDict`. The contents are as follows:
-
- dnnlib.EasyDict(
- NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
- ...
- )
- """
- stats = dnnlib.EasyDict()
- for name in self.names():
- stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
- return stats
-
- def __getitem__(self, name):
- r"""Convenience getter.
- `collector[name]` is a synonym for `collector.mean(name)`.
- """
- return self.mean(name)
-
-#----------------------------------------------------------------------------
-
-def _sync(names):
- r"""Synchronize the global cumulative counters across devices and
- processes. Called internally by `Collector.update()`.
- """
- if len(names) == 0:
- return []
- global _sync_called
- _sync_called = True
-
- # Collect deltas within current rank.
- deltas = []
- device = _sync_device if _sync_device is not None else torch.device('cpu')
- for name in names:
- delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
- for counter in _counters[name].values():
- delta.add_(counter.to(device))
- counter.copy_(torch.zeros_like(counter))
- deltas.append(delta)
- deltas = torch.stack(deltas)
-
- # Sum deltas across ranks.
- if _sync_device is not None:
- torch.distributed.all_reduce(deltas)
-
- # Update cumulative values.
- deltas = deltas.cpu()
- for idx, name in enumerate(names):
- if name not in _cumulative:
- _cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
- _cumulative[name].add_(deltas[idx])
-
- # Return name-value pairs.
- return [(name, _cumulative[name]) for name in names]
-
-#----------------------------------------------------------------------------
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py
deleted file mode 100644
index 22c50b356adf906bd6a579749d0c120f9cac8381..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
- Pygments
- ~~~~~~~~
-
- Pygments is a syntax highlighting package written in Python.
-
- It is a generic syntax highlighter for general use in all kinds of software
- such as forum systems, wikis or other applications that need to prettify
- source code. Highlights are:
-
- * a wide range of common languages and markup formats is supported
- * special attention is paid to details, increasing quality by a fair amount
- * support for new languages and formats are added easily
- * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
- formats that PIL supports, and ANSI sequences
- * it is usable as a command-line tool and as a library
- * ... and it highlights even Brainfuck!
-
- The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
-
- .. _Pygments master branch:
- https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
-
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-from io import StringIO, BytesIO
-
-__version__ = '2.11.2'
-__docformat__ = 'restructuredtext'
-
-__all__ = ['lex', 'format', 'highlight']
-
-
-def lex(code, lexer):
- """
- Lex ``code`` with ``lexer`` and return an iterable of tokens.
- """
- try:
- return lexer.get_tokens(code)
- except TypeError as err:
- if (isinstance(err.args[0], str) and
- ('unbound method get_tokens' in err.args[0] or
- 'missing 1 required positional argument' in err.args[0])):
- raise TypeError('lex() argument must be a lexer instance, '
- 'not a class')
- raise
-
-
-def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
- """
- Format a tokenlist ``tokens`` with the formatter ``formatter``.
-
- If ``outfile`` is given and a valid file object (an object
- with a ``write`` method), the result will be written to it, otherwise
- it is returned as a string.
- """
- try:
- if not outfile:
- realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
- formatter.format(tokens, realoutfile)
- return realoutfile.getvalue()
- else:
- formatter.format(tokens, outfile)
- except TypeError as err:
- if (isinstance(err.args[0], str) and
- ('unbound method format' in err.args[0] or
- 'missing 1 required positional argument' in err.args[0])):
- raise TypeError('format() argument must be a formatter instance, '
- 'not a class')
- raise
-
-
-def highlight(code, lexer, formatter, outfile=None):
- """
- Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
-
- If ``outfile`` is given and a valid file object (an object
- with a ``write`` method), the result will be written to it, otherwise
- it is returned as a string.
- """
- return format(lex(code, lexer), formatter, outfile)
-
diff --git a/spaces/algomuffin/jojo_fork/op/conv2d_gradfix.py b/spaces/algomuffin/jojo_fork/op/conv2d_gradfix.py
deleted file mode 100644
index bb2f94bbcb8132299fd4d538972d32bd7ff6e7d6..0000000000000000000000000000000000000000
--- a/spaces/algomuffin/jojo_fork/op/conv2d_gradfix.py
+++ /dev/null
@@ -1,227 +0,0 @@
-import contextlib
-import warnings
-
-import torch
-from torch import autograd
-from torch.nn import functional as F
-
-enabled = True
-weight_gradients_disabled = False
-
-
-@contextlib.contextmanager
-def no_weight_gradients():
- global weight_gradients_disabled
-
- old = weight_gradients_disabled
- weight_gradients_disabled = True
- yield
- weight_gradients_disabled = old
-
-
-def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
- if could_use_op(input):
- return conv2d_gradfix(
- transpose=False,
- weight_shape=weight.shape,
- stride=stride,
- padding=padding,
- output_padding=0,
- dilation=dilation,
- groups=groups,
- ).apply(input, weight, bias)
-
- return F.conv2d(
- input=input,
- weight=weight,
- bias=bias,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups,
- )
-
-
-def conv_transpose2d(
- input,
- weight,
- bias=None,
- stride=1,
- padding=0,
- output_padding=0,
- groups=1,
- dilation=1,
-):
- if could_use_op(input):
- return conv2d_gradfix(
- transpose=True,
- weight_shape=weight.shape,
- stride=stride,
- padding=padding,
- output_padding=output_padding,
- groups=groups,
- dilation=dilation,
- ).apply(input, weight, bias)
-
- return F.conv_transpose2d(
- input=input,
- weight=weight,
- bias=bias,
- stride=stride,
- padding=padding,
- output_padding=output_padding,
- dilation=dilation,
- groups=groups,
- )
-
-
-def could_use_op(input):
- if (not enabled) or (not torch.backends.cudnn.enabled):
- return False
-
- if input.device.type != "cuda":
- return False
-
- if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]):
- return True
-
- warnings.warn(
- f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()."
- )
-
- return False
-
-
-def ensure_tuple(xs, ndim):
- xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
-
- return xs
-
-
-conv2d_gradfix_cache = dict()
-
-
-def conv2d_gradfix(
- transpose, weight_shape, stride, padding, output_padding, dilation, groups
-):
- ndim = 2
- weight_shape = tuple(weight_shape)
- stride = ensure_tuple(stride, ndim)
- padding = ensure_tuple(padding, ndim)
- output_padding = ensure_tuple(output_padding, ndim)
- dilation = ensure_tuple(dilation, ndim)
-
- key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
- if key in conv2d_gradfix_cache:
- return conv2d_gradfix_cache[key]
-
- common_kwargs = dict(
- stride=stride, padding=padding, dilation=dilation, groups=groups
- )
-
- def calc_output_padding(input_shape, output_shape):
- if transpose:
- return [0, 0]
-
- return [
- input_shape[i + 2]
- - (output_shape[i + 2] - 1) * stride[i]
- - (1 - 2 * padding[i])
- - dilation[i] * (weight_shape[i + 2] - 1)
- for i in range(ndim)
- ]
-
- class Conv2d(autograd.Function):
- @staticmethod
- def forward(ctx, input, weight, bias):
- if not transpose:
- out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
-
- else:
- out = F.conv_transpose2d(
- input=input,
- weight=weight,
- bias=bias,
- output_padding=output_padding,
- **common_kwargs,
- )
-
- ctx.save_for_backward(input, weight)
-
- return out
-
- @staticmethod
- def backward(ctx, grad_output):
- input, weight = ctx.saved_tensors
- grad_input, grad_weight, grad_bias = None, None, None
-
- if ctx.needs_input_grad[0]:
- p = calc_output_padding(
- input_shape=input.shape, output_shape=grad_output.shape
- )
- grad_input = conv2d_gradfix(
- transpose=(not transpose),
- weight_shape=weight_shape,
- output_padding=p,
- **common_kwargs,
- ).apply(grad_output, weight, None)
-
- if ctx.needs_input_grad[1] and not weight_gradients_disabled:
- grad_weight = Conv2dGradWeight.apply(grad_output, input)
-
- if ctx.needs_input_grad[2]:
- grad_bias = grad_output.sum((0, 2, 3))
-
- return grad_input, grad_weight, grad_bias
-
- class Conv2dGradWeight(autograd.Function):
- @staticmethod
- def forward(ctx, grad_output, input):
- op = torch._C._jit_get_operation(
- "aten::cudnn_convolution_backward_weight"
- if not transpose
- else "aten::cudnn_convolution_transpose_backward_weight"
- )
- flags = [
- torch.backends.cudnn.benchmark,
- torch.backends.cudnn.deterministic,
- torch.backends.cudnn.allow_tf32,
- ]
- grad_weight = op(
- weight_shape,
- grad_output,
- input,
- padding,
- stride,
- dilation,
- groups,
- *flags,
- )
- ctx.save_for_backward(grad_output, input)
-
- return grad_weight
-
- @staticmethod
- def backward(ctx, grad_grad_weight):
- grad_output, input = ctx.saved_tensors
- grad_grad_output, grad_grad_input = None, None
-
- if ctx.needs_input_grad[0]:
- grad_grad_output = Conv2d.apply(input, grad_grad_weight, None)
-
- if ctx.needs_input_grad[1]:
- p = calc_output_padding(
- input_shape=input.shape, output_shape=grad_output.shape
- )
- grad_grad_input = conv2d_gradfix(
- transpose=(not transpose),
- weight_shape=weight_shape,
- output_padding=p,
- **common_kwargs,
- ).apply(grad_output, grad_grad_weight, None)
-
- return grad_grad_output, grad_grad_input
-
- conv2d_gradfix_cache[key] = Conv2d
-
- return Conv2d
diff --git a/spaces/aliabd/SummerTime/dataset/non_huggingface_datasets_builders/scisummnet.py b/spaces/aliabd/SummerTime/dataset/non_huggingface_datasets_builders/scisummnet.py
deleted file mode 100644
index 0b6bcfb5bfc02e09be903d988ec45d0a0a06606e..0000000000000000000000000000000000000000
--- a/spaces/aliabd/SummerTime/dataset/non_huggingface_datasets_builders/scisummnet.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import os
-import datasets
-
-
-"""Scisummnet dataset."""
-
-
-_CITATION = """
-@InProceedings{yasunaga&al.19.scisumm,
- title = {{ScisummNet}: A Large Annotated Corpus and Content-Impact Models for Scientific Paper Summarization with Citation Networks},
- author = {Michihiro Yasunaga and Jungo Kasai and Rui Zhang and Alexander Fabbri and Irene Li and Dan Friedman and Dragomir Radev},
- booktitle = {Proceedings of AAAI 2019},
- year = {2019}
-}
-@InProceedings{yasunaga&al.17,
- title = {Graph-based Neural Multi-Document Summarization},
- author = {Yasunaga, Michihiro and Zhang, Rui and Meelu, Kshitijh and Pareek, Ayush and Srinivasan, Krishnan and Radev, Dragomir R.},
- booktitle = {Proceedings of CoNLL 2017},
- year = {2017}
-}
-"""
-
-_DESCRIPTION = """
-A summary of scientific papers should ideally incorporate the impact of the papers on the research community
-reflected by citations. To facilitate research in citation-aware scientific paper summarization (Scisumm),
-the CL-Scisumm shared task has been organized since 2014 for papers in the computational linguistics and NLP domain.
-"""
-
-_HOMEPAGE = "https://cs.stanford.edu/~myasu/projects/scisumm_net/"
-
-_LICENSE = "CC BY-SA 4.0"
-
-_URLs = "https://cs.stanford.edu/~myasu/projects/scisumm_net/scisummnet_release1.1__20190413.zip"
-
-
-class SummertimeScisummnet(datasets.GeneratorBasedBuilder):
- """Scisummnet dataset."""
-
- VERSION = datasets.Version("1.1.0")
-
- BUILDER_CONFIGS = [
- datasets.BuilderConfig(),
- ]
-
- def _info(self):
- features = datasets.Features(
- {
- "entry_number": datasets.Value("string"),
- "document_xml": datasets.Value("string"),
- "citing_sentences_annotated.json": datasets.Value("string"),
- "summary": datasets.Value("string"),
- }
- )
- return datasets.DatasetInfo(
- description=_DESCRIPTION,
- features=features,
- supervised_keys=None,
- homepage=_HOMEPAGE,
- license=_LICENSE,
- citation=_CITATION,
- )
-
- def _split_generators(self, dl_manager):
- """Returns SplitGenerators."""
- my_urls = _URLs
- path = dl_manager.download_and_extract(my_urls)
- trainpath = os.path.join(
- path, "scisummnet_release1.1__20190413", "top1000_complete"
- )
- return [
- datasets.SplitGenerator(
- name=datasets.Split.TRAIN,
- # These kwargs will be passed to _generate_examples
- gen_kwargs={"extraction_path": trainpath, "split": "train"},
- )
- ]
-
- def _generate_examples(self, extraction_path, split):
- """Yields examples."""
-
- for folder in os.listdir(extraction_path):
-
- entry = {}
-
- entry["entry_number"] = folder
-
- doc_xml_path = os.path.join(
- extraction_path, folder, "Documents_xml", folder + ".xml"
- )
- with open(doc_xml_path, "r", encoding="utf-8") as f:
- entry["document_xml"] = f.read()
-
- cite_annot_path = os.path.join(
- extraction_path, folder, "citing_sentences_annotated.json"
- )
- with open(cite_annot_path, "r", encoding="utf-8") as f:
- entry["citing_sentences_annotated.json"] = f.read()
-
- summary_path = os.path.join(
- extraction_path, folder, "summary", folder + ".gold.txt"
- )
- with open(summary_path, "r", encoding="utf-8") as f:
- entry["summary"] = f.read()
-
- yield entry["entry_number"], entry
diff --git a/spaces/almakedon/faster-whisper-webui/docs/options.md b/spaces/almakedon/faster-whisper-webui/docs/options.md
deleted file mode 100644
index 6979fca4d9d4c98a626a2953c2573ff23898a37e..0000000000000000000000000000000000000000
--- a/spaces/almakedon/faster-whisper-webui/docs/options.md
+++ /dev/null
@@ -1,134 +0,0 @@
-# Standard Options
-To transcribe or translate an audio file, you can either copy an URL from a website (all [websites](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)
-supported by YT-DLP will work, including YouTube). Otherwise, upload an audio file (choose "All Files (*.*)"
-in the file selector to select any file type, including video files) or use the microphone.
-
-For longer audio files (>10 minutes), it is recommended that you select Silero VAD (Voice Activity Detector) in the VAD option, especially if you are using the `large-v1` model. Note that `large-v2` is a lot more forgiving, but you may still want to use a VAD with a slightly higher "VAD - Max Merge Size (s)" (60 seconds or more).
-
-## Model
-Select the model that Whisper will use to transcribe the audio:
-
-| Size | Parameters | English-only model | Multilingual model | Required VRAM | Relative speed |
-|-----------|------------|--------------------|--------------------|---------------|----------------|
-| tiny | 39 M | tiny.en | tiny | ~1 GB | ~32x |
-| base | 74 M | base.en | base | ~1 GB | ~16x |
-| small | 244 M | small.en | small | ~2 GB | ~6x |
-| medium | 769 M | medium.en | medium | ~5 GB | ~2x |
-| large | 1550 M | N/A | large | ~10 GB | 1x |
-| large-v2 | 1550 M | N/A | large | ~10 GB | 1x |
-
-## Language
-
-Select the language, or leave it empty for Whisper to automatically detect it.
-
-Note that if the selected language and the language in the audio differs, Whisper may start to translate the audio to the selected
-language. For instance, if the audio is in English but you select Japaneese, the model may translate the audio to Japanese.
-
-## Inputs
-The options "URL (YouTube, etc.)", "Upload Files" or "Micriphone Input" allows you to send an audio input to the model.
-
-### Multiple Files
-Note that the UI will only process either the given URL or the upload files (including microphone) - not both.
-
-But you can upload multiple files either through the "Upload files" option, or as a playlist on YouTube. Each audio file will then be processed in turn, and the resulting SRT/VTT/Transcript will be made available in the "Download" section. When more than one file is processed, the UI will also generate a "All_Output" zip file containing all the text output files.
-
-## Task
-Select the task - either "transcribe" to transcribe the audio to text, or "translate" to translate it to English.
-
-## Vad
-Using a VAD will improve the timing accuracy of each transcribed line, as well as prevent Whisper getting into an infinite
-loop detecting the same sentence over and over again. The downside is that this may be at a cost to text accuracy, especially
-with regards to unique words or names that appear in the audio. You can compensate for this by increasing the prompt window.
-
-Note that English is very well handled by Whisper, and it's less susceptible to issues surrounding bad timings and infinite loops.
-So you may only need to use a VAD for other languages, such as Japanese, or when the audio is very long.
-
-* none
- * Run whisper on the entire audio input
-* silero-vad
- * Use Silero VAD to detect sections that contain speech, and run Whisper on independently on each section. Whisper is also run
- on the gaps between each speech section, by either expanding the section up to the max merge size, or running Whisper independently
- on the non-speech section.
-* silero-vad-expand-into-gaps
- * Use Silero VAD to detect sections that contain speech, and run Whisper on independently on each section. Each spech section will be expanded
- such that they cover any adjacent non-speech sections. For instance, if an audio file of one minute contains the speech sections
- 00:00 - 00:10 (A) and 00:30 - 00:40 (B), the first section (A) will be expanded to 00:00 - 00:30, and (B) will be expanded to 00:30 - 00:60.
-* silero-vad-skip-gaps
- * As above, but sections that doesn't contain speech according to Silero will be skipped. This will be slightly faster, but
- may cause dialogue to be skipped.
-* periodic-vad
- * Create sections of speech every 'VAD - Max Merge Size' seconds. This is very fast and simple, but will potentially break
- a sentence or word in two.
-
-## VAD - Merge Window
-If set, any adjacent speech sections that are at most this number of seconds apart will be automatically merged.
-
-## VAD - Max Merge Size (s)
-Disables merging of adjacent speech sections if they are this number of seconds long.
-
-## VAD - Padding (s)
-The number of seconds (floating point) to add to the beginning and end of each speech section. Setting this to a number
-larger than zero ensures that Whisper is more likely to correctly transcribe a sentence in the beginning of
-a speech section. However, this also increases the probability of Whisper assigning the wrong timestamp
-to each transcribed line. The default value is 1 second.
-
-## VAD - Prompt Window (s)
-The text of a detected line will be included as a prompt to the next speech section, if the speech section starts at most this
-number of seconds after the line has finished. For instance, if a line ends at 10:00, and the next speech section starts at
-10:04, the line's text will be included if the prompt window is 4 seconds or more (10:04 - 10:00 = 4 seconds).
-
-Note that detected lines in gaps between speech sections will not be included in the prompt
-(if silero-vad or silero-vad-expand-into-gaps) is used.
-
-# Command Line Options
-
-Both `app.py` and `cli.py` also accept command line options, such as the ability to enable parallel execution on multiple
-CPU/GPU cores, the default model name/VAD and so on. Consult the README in the root folder for more information.
-
-# Additional Options
-
-In addition to the above, there's also a "Full" options interface that allows you to set all the options available in the Whisper
-model. The options are as follows:
-
-## Initial Prompt
-Optional text to provide as a prompt for the first 30 seconds window. Whisper will attempt to use this as a starting point for the transcription, but you can
-also get creative and specify a style or format for the output of the transcription.
-
-For instance, if you use the prompt "hello how is it going always use lowercase no punctuation goodbye one two three start stop i you me they", Whisper will
-be biased to output lower capital letters and no punctuation, and may also be biased to output the words in the prompt more often.
-
-## Temperature
-The temperature to use when sampling. Default is 0 (zero). A higher temperature will result in more random output, while a lower temperature will be more deterministic.
-
-## Best Of - Non-zero temperature
-The number of candidates to sample from when sampling with non-zero temperature. Default is 5.
-
-## Beam Size - Zero temperature
-The number of beams to use in beam search when sampling with zero temperature. Default is 5.
-
-## Patience - Zero temperature
-The patience value to use in beam search when sampling with zero temperature. As in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search.
-
-## Length Penalty - Any temperature
-The token length penalty coefficient (alpha) to use when sampling with any temperature. As in https://arxiv.org/abs/1609.08144, uses simple length normalization by default.
-
-## Suppress Tokens - Comma-separated list of token IDs
-A comma-separated list of token IDs to suppress during sampling. The default value of "-1" will suppress most special characters except common punctuations.
-
-## Condition on previous text
-If True, provide the previous output of the model as a prompt for the next window. Disabling this may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop.
-
-## FP16
-Whether to perform inference in fp16. True by default.
-
-## Temperature increment on fallback
-The temperature to increase when falling back when the decoding fails to meet either of the thresholds below. Default is 0.2.
-
-## Compression ratio threshold
-If the gzip compression ratio is higher than this value, treat the decoding as failed. Default is 2.4.
-
-## Logprob threshold
-If the average log probability is lower than this value, treat the decoding as failed. Default is -1.0.
-
-## No speech threshold
-If the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence. Default is 0.6.
diff --git a/spaces/amankishore/sjc/my/README.md b/spaces/amankishore/sjc/my/README.md
deleted file mode 100644
index 5daa1c788deef956d5cb6399ecba2c96d947d827..0000000000000000000000000000000000000000
--- a/spaces/amankishore/sjc/my/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-a personal tookit for experiment management;
-some of the designs patterns are inspired by detectron2
diff --git a/spaces/andreped/AeroPath/.github/ISSUE_TEMPLATE/bug_report.md b/spaces/andreped/AeroPath/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 066324e5c29270804250e512b0dfbac2289eeabd..0000000000000000000000000000000000000000
--- a/spaces/andreped/AeroPath/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-name: Bug report
-about: Create a report to help us improve
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Describe the bug**
-A clear and concise description of what the bug is.
-
-**To Reproduce**
-Steps to reproduce the behavior:
-1. Go to '...'
-2. Click on '....'
-3. Scroll down to '....'
-4. See error
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Screenshots**
-If applicable, add screenshots to help explain your problem.
-
-**Desktop (please complete the following information):**
- - OS: [e.g. Windows]
- - Version: [e.g. 10]
- - Python: [e.g. 3.8.10]
-
-**Additional context**
-Add any other context about the problem here.
diff --git a/spaces/anonymousauthorsanonymous/spurious/app.py b/spaces/anonymousauthorsanonymous/spurious/app.py
deleted file mode 100644
index 84167686d6c6ffb76ec3635255516622ce12fbf3..0000000000000000000000000000000000000000
--- a/spaces/anonymousauthorsanonymous/spurious/app.py
+++ /dev/null
@@ -1,472 +0,0 @@
-# %%
-import gradio as gr
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-import random
-from matplotlib.ticker import MaxNLocator
-from transformers import pipeline
-
-MODEL_NAMES = ["bert-base-uncased", "roberta-base", "bert-large-uncased", "roberta-large"]
-OWN_MODEL_NAME = 'add-a-model'
-
-DECIMAL_PLACES = 1
-EPS = 1e-5 # to avoid /0 errors
-
-# Example date conts
-DATE_SPLIT_KEY = "DATE"
-START_YEAR = 1801
-STOP_YEAR = 1999
-NUM_PTS = 20
-DATES = np.linspace(START_YEAR, STOP_YEAR, NUM_PTS).astype(int).tolist()
-DATES = [f'{d}' for d in DATES]
-
-# Example place conts
-# https://www3.weforum.org/docs/WEF_GGGR_2021.pdf
-# Bottom 10 and top 10 Global Gender Gap ranked countries.
-PLACE_SPLIT_KEY = "PLACE"
-PLACES = [
- "Afghanistan",
- "Yemen",
- "Iraq",
- "Pakistan",
- "Syria",
- "Democratic Republic of Congo",
- "Iran",
- "Mali",
- "Chad",
- "Saudi Arabia",
- "Switzerland",
- "Ireland",
- "Lithuania",
- "Rwanda",
- "Namibia",
- "Sweden",
- "New Zealand",
- "Norway",
- "Finland",
- "Iceland"]
-
-
-# Example Reddit interest consts
-# in order of increasing self-identified female participation.
-# See http://bburky.com/subredditgenderratios/ , Minimum subreddit size: 400000
-SUBREDDITS = [
- "GlobalOffensive",
- "pcmasterrace",
- "nfl",
- "sports",
- "The_Donald",
- "leagueoflegends",
- "Overwatch",
- "gonewild",
- "Futurology",
- "space",
- "technology",
- "gaming",
- "Jokes",
- "dataisbeautiful",
- "woahdude",
- "askscience",
- "wow",
- "anime",
- "BlackPeopleTwitter",
- "politics",
- "pokemon",
- "worldnews",
- "reddit.com",
- "interestingasfuck",
- "videos",
- "nottheonion",
- "television",
- "science",
- "atheism",
- "movies",
- "gifs",
- "Music",
- "trees",
- "EarthPorn",
- "GetMotivated",
- "pokemongo",
- "news",
- # removing below subreddit as most of the tokens are taken up by it:
- # ['ff', '##ff', '##ff', '##fu', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', '##u', ...]
- # "fffffffuuuuuuuuuuuu",
- "Fitness",
- "Showerthoughts",
- "OldSchoolCool",
- "explainlikeimfive",
- "todayilearned",
- "gameofthrones",
- "AdviceAnimals",
- "DIY",
- "WTF",
- "IAmA",
- "cringepics",
- "tifu",
- "mildlyinteresting",
- "funny",
- "pics",
- "LifeProTips",
- "creepy",
- "personalfinance",
- "food",
- "AskReddit",
- "books",
- "aww",
- "sex",
- "relationships",
-]
-
-GENDERED_LIST = [
- ['he', 'she'],
- ['him', 'her'],
- ['his', 'hers'],
- ["himself", "herself"],
- ['male', 'female'],
- ['man', 'woman'],
- ['men', 'women'],
- ["husband", "wife"],
- ['father', 'mother'],
- ['boyfriend', 'girlfriend'],
- ['brother', 'sister'],
- ["actor", "actress"],
-]
-
-# %%
-# Fire up the models
-models = dict()
-
-for bert_like in MODEL_NAMES:
- models[bert_like] = pipeline("fill-mask", model=bert_like)
-
-# %%
-
-
-def get_gendered_token_ids():
- male_gendered_tokens = [list[0] for list in GENDERED_LIST]
- female_gendered_tokens = [list[1] for list in GENDERED_LIST]
-
- return male_gendered_tokens, female_gendered_tokens
-
-
-def prepare_text_for_masking(input_text, mask_token, gendered_tokens, split_key):
- text_w_masks_list = [
- mask_token if word.lower() in gendered_tokens else word for word in input_text.split()]
- num_masks = len([m for m in text_w_masks_list if m == mask_token])
-
- text_portions = ' '.join(text_w_masks_list).split(split_key)
- return text_portions, num_masks
-
-
-def get_avg_prob_from_pipeline_outputs(mask_filled_text, gendered_token, num_preds):
- pronoun_preds = [sum([
- pronoun["score"] if pronoun["token_str"].strip().lower() in gendered_token else 0.0
- for pronoun in top_preds])
- for top_preds in mask_filled_text
- ]
- return round(sum(pronoun_preds) / (EPS + num_preds) * 100, DECIMAL_PLACES)
-
-# %%
-
-
-def get_figure(df, gender, n_fit=1):
- df = df.set_index('x-axis')
- cols = df.columns
- xs = list(range(len(df)))
- ys = df[cols[0]]
- fig, ax = plt.subplots()
- # Trying small fig due to rendering issues on HF, not on VS Code
- fig.set_figheight(3)
- fig.set_figwidth(9)
-
- # find stackoverflow reference
- p, C_p = np.polyfit(xs, ys, n_fit, cov=1)
- t = np.linspace(min(xs)-1, max(xs)+1, 10*len(xs))
- TT = np.vstack([t**(n_fit-i) for i in range(n_fit+1)]).T
-
- # matrix multiplication calculates the polynomial values
- yi = np.dot(TT, p)
- C_yi = np.dot(TT, np.dot(C_p, TT.T)) # C_y = TT*C_z*TT.T
- sig_yi = np.sqrt(np.diag(C_yi)) # Standard deviations are sqrt of diagonal
-
- ax.fill_between(t, yi+sig_yi, yi-sig_yi, alpha=.25)
- ax.plot(t, yi, '-')
- ax.plot(df, 'ro')
- ax.legend(list(df.columns))
-
- ax.axis('tight')
- ax.set_xlabel("Value injected into input text")
- ax.set_title(
- f"Probability of predicting {gender} pronouns.")
- ax.set_ylabel(f"Softmax prob for pronouns")
- ax.xaxis.set_major_locator(MaxNLocator(6))
- ax.tick_params(axis='x', labelrotation=5)
- return fig
-
-
-# %%
-def predict_gender_pronouns(
- model_name,
- own_model_name,
- indie_vars,
- split_key,
- normalizing,
- n_fit,
- input_text,
-):
- """Run inference on input_text for each model type, returning df and plots of percentage
- of gender pronouns predicted as female and male in each target text.
- """
- if model_name not in MODEL_NAMES:
- model = pipeline("fill-mask", model=own_model_name)
- else:
- model = models[model_name]
-
- mask_token = model.tokenizer.mask_token
-
- indie_vars_list = indie_vars.split(',')
-
- male_gendered_tokens, female_gendered_tokens = get_gendered_token_ids()
-
- text_segments, num_preds = prepare_text_for_masking(
- input_text, mask_token, male_gendered_tokens + female_gendered_tokens, split_key)
-
- male_pronoun_preds = []
- female_pronoun_preds = []
- for indie_var in indie_vars_list:
-
- target_text = f"{indie_var}".join(text_segments)
- mask_filled_text = model(target_text)
- # Quick hack as realized return type based on how many MASKs in text.
- if type(mask_filled_text[0]) is not list:
- mask_filled_text = [mask_filled_text]
-
- female_pronoun_preds.append(get_avg_prob_from_pipeline_outputs(
- mask_filled_text,
- female_gendered_tokens,
- num_preds
- ))
- male_pronoun_preds.append(get_avg_prob_from_pipeline_outputs(
- mask_filled_text,
- male_gendered_tokens,
- num_preds
- ))
-
- if normalizing:
- total_gendered_probs = np.add(
- female_pronoun_preds, male_pronoun_preds)
- female_pronoun_preds = np.around(
- np.divide(female_pronoun_preds, total_gendered_probs+EPS)*100,
- decimals=DECIMAL_PLACES
- )
- male_pronoun_preds = np.around(
- np.divide(male_pronoun_preds, total_gendered_probs+EPS)*100,
- decimals=DECIMAL_PLACES
- )
-
- results_df = pd.DataFrame({'x-axis': indie_vars_list})
- results_df['female_pronouns'] = female_pronoun_preds
- results_df['male_pronouns'] = male_pronoun_preds
- female_fig = get_figure(results_df.drop(
- 'male_pronouns', axis=1), 'female', n_fit,)
- male_fig = get_figure(results_df.drop(
- 'female_pronouns', axis=1), 'male', n_fit,)
- display_text = f"{random.choice(indie_vars_list)}".join(text_segments)
-
- return (
- display_text,
- female_fig,
- male_fig,
- results_df,
- )
-
-
-# %%
-title = "Causing Gender Pronouns"
-description = """
-## Intro
-"""
-
-
-date_example = [
- MODEL_NAMES[1],
- '',
- ', '.join(DATES),
- 'DATE',
- "False",
- 1,
- 'She was a teenager in DATE.'
-]
-
-
-place_example = [
- MODEL_NAMES[0],
- '',
- ', '.join(PLACES),
- 'PLACE',
- "False",
- 1,
- 'She became an adult in PLACE.'
-]
-
-
-subreddit_example = [
- MODEL_NAMES[3],
- '',
- ', '.join(SUBREDDITS),
- 'SUBREDDIT',
- "False",
- 1,
- 'She was a kid. SUBREDDIT.'
-]
-
-own_model_example = [
- OWN_MODEL_NAME,
- 'emilyalsentzer/Bio_ClinicalBERT',
- ', '.join(DATES),
- 'DATE',
- "False",
- 1,
- 'She was exposed to the virus in DATE.'
-]
-
-
-def date_fn():
- return date_example
-
-
-def place_fn():
- return place_example
-
-
-def reddit_fn():
- return subreddit_example
-
-
-def your_fn():
- return own_model_example
-
-
-# %%
-demo = gr.Blocks()
-with demo:
- gr.Markdown("# Spurious Correlation Evaluation for Pre-trained LLMs")
- gr.Markdown("Find spurious correlations between seemingly independent variables (for example between `gender` and `time`) in almost any BERT-like LLM on Hugging Face, below.")
-
- # gr.Markdown("Note: If there is an issue with the rendering of the results taking longer than expected (more than 10s of seconds), there may be an unexpected issue effecting the hosting. If so, please see this [backup colab notebook](https://colab.research.google.com/drive/1A3a9cy9fERaxkuoX8YNTFhLlhRt_cxMm?usp=sharing).")
-
-
- gr.Markdown("## Instructions for this Demo")
- gr.Markdown("1) Click on one of the examples below (where we sweep through a spectrum of `places`, `dates` and `subreddits`) to pre-populate the input fields.")
- gr.Markdown("2) Check out the pre-populated fields as you scroll down to the ['Hit Submit...'] button!")
- gr.Markdown("3) Repeat steps (1) and (2) with more pre-populated inputs or with your own values in the input fields!")
-
- gr.Markdown("## Example inputs")
- gr.Markdown("Click a button below to pre-populate input fields with example values. Then scroll down to Hit Submit to generate predictions.")
- with gr.Row():
- date_gen = gr.Button('Click for date example inputs')
- gr.Markdown("<-- x-axis sorted by older to more recent dates:")
-
- place_gen = gr.Button('Click for country example inputs')
- gr.Markdown(
- "<-- x-axis sorted by bottom 10 and top 10 [Global Gender Gap](https://www3.weforum.org/docs/WEF_GGGR_2021.pdf) ranked countries:")
-
- subreddit_gen = gr.Button('Click for Subreddit example inputs')
- gr.Markdown(
- "<-- x-axis sorted in order of increasing self-identified female participation (see [bburky](http://bburky.com/subredditgenderratios/)): ")
-
- your_gen = gr.Button('Add-a-model example inputs')
- gr.Markdown("<-- x-axis dates, with your own model loaded! (If first time, try another example, it can take a while to load new model.)")
-
- gr.Markdown("## Input fields")
- gr.Markdown(
- f"A) Pick a spectrum of comma separated values for text injection and x-axis.")
-
- with gr.Row():
- x_axis = gr.Textbox(
- lines=3,
- label="A) Comma separated values for text injection and x-axis",
- )
-
-
- gr.Markdown("B) Pick a pre-loaded BERT-family model of interest on the right.")
- gr.Markdown(f"Or C) select `{OWN_MODEL_NAME}`, then add the mame of any other Hugging Face model that supports the [fill-mask](https://huggingface.co/models?pipeline_tag=fill-mask) task on the right (note: this may take some time to load).")
-
- with gr.Row():
- model_name = gr.Radio(
- MODEL_NAMES + [OWN_MODEL_NAME],
- type="value",
- label="B) BERT-like model.",
- )
- own_model_name = gr.Textbox(
- label="C) If you selected an 'add-a-model' model, put any Hugging Face pipeline model name (that supports the fill-mask task) here.",
- )
-
- gr.Markdown("D) Pick if you want to the predictions normalied to these gendered terms only.")
- gr.Markdown("E) Also tell the demo what special token you will use in your input text, that you would like replaced with the spectrum of values you listed above.")
- gr.Markdown("And F) the degree of polynomial fit used for high-lighting potential spurious association.")
-
-
- with gr.Row():
- to_normalize = gr.Dropdown(
- ["False", "True"],
- label="D) Normalize model's predictions to only the gendered ones?",
- type="index",
- )
- place_holder = gr.Textbox(
- label="E) Special token place-holder",
- )
- n_fit = gr.Dropdown(
- list(range(1, 5)),
- label="F) Degree of polynomial fit",
- type="value",
- )
-
- gr.Markdown(
- "G) Finally, add input text that includes at least one gendered pronouns and one place-holder token specified above.")
-
- with gr.Row():
- input_text = gr.Textbox(
- lines=2,
- label="G) Input text with pronouns and place-holder token",
- )
-
- gr.Markdown("## Outputs!")
- #gr.Markdown("Scroll down and 'Hit Submit'!")
- with gr.Row():
- btn = gr.Button("Hit submit to generate predictions!")
-
- with gr.Row():
- sample_text = gr.Textbox(
- type="auto", label="Output text: Sample of text fed to model")
- with gr.Row():
- female_fig = gr.Plot(type="auto")
- male_fig = gr.Plot(type="auto")
- with gr.Row():
- df = gr.Dataframe(
- show_label=True,
- overflow_row_behaviour="show_ends",
- label="Table of softmax probability for pronouns predictions",
- )
-
- with gr.Row():
-
- date_gen.click(date_fn, inputs=[], outputs=[model_name, own_model_name,
- x_axis, place_holder, to_normalize, n_fit, input_text])
- place_gen.click(place_fn, inputs=[], outputs=[
- model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text])
- subreddit_gen.click(reddit_fn, inputs=[], outputs=[
- model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text])
- your_gen.click(your_fn, inputs=[], outputs=[
- model_name, own_model_name, x_axis, place_holder, to_normalize, n_fit, input_text])
-
- btn.click(
- predict_gender_pronouns,
- inputs=[model_name, own_model_name, x_axis, place_holder,
- to_normalize, n_fit, input_text],
- outputs=[sample_text, female_fig, male_fig, df])
-
-
-demo.launch(debug=True)
-
diff --git a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/chat.py b/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/chat.py
deleted file mode 100644
index 98e171b0f35041ec12d01657297a1fc8b9fa91dd..0000000000000000000000000000000000000000
--- a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/chat.py
+++ /dev/null
@@ -1,562 +0,0 @@
-import ast
-import base64
-import copy
-import io
-import json
-import logging
-import re
-from datetime import datetime
-from pathlib import Path
-
-import yaml
-from PIL import Image
-
-import modules.shared as shared
-from modules.extensions import apply_extensions
-from modules.html_generator import chat_html_wrapper, make_thumbnail
-from modules.text_generation import (encode, generate_reply,
- get_max_prompt_length)
-
-
-# Replace multiple string pairs in a string
-def replace_all(text, dic):
- for i, j in dic.items():
- text = text.replace(i, j)
-
- return text
-
-
-def generate_chat_prompt(user_input, state, **kwargs):
- impersonate = kwargs['impersonate'] if 'impersonate' in kwargs else False
- _continue = kwargs['_continue'] if '_continue' in kwargs else False
- also_return_rows = kwargs['also_return_rows'] if 'also_return_rows' in kwargs else False
- is_instruct = state['mode'] == 'instruct'
- rows = [state['context'] if is_instruct else f"{state['context'].strip()}\n"]
- min_rows = 3
-
- # Finding the maximum prompt size
- chat_prompt_size = state['chat_prompt_size']
- if shared.soft_prompt:
- chat_prompt_size -= shared.soft_prompt_tensor.shape[1]
-
- max_length = min(get_max_prompt_length(state), chat_prompt_size)
-
- # Building the turn templates
- if 'turn_template' not in state or state['turn_template'] == '':
- if is_instruct:
- template = '<|user|>\n<|user-message|>\n<|bot|>\n<|bot-message|>\n'
- else:
- template = '<|user|>: <|user-message|>\n<|bot|>: <|bot-message|>\n'
- else:
- template = state['turn_template'].replace(r'\n', '\n')
-
- replacements = {
- '<|user|>': state['name1'].strip(),
- '<|bot|>': state['name2'].strip(),
- }
-
- user_turn = replace_all(template.split('<|bot|>')[0], replacements)
- bot_turn = replace_all('<|bot|>' + template.split('<|bot|>')[1], replacements)
- user_turn_stripped = replace_all(user_turn.split('<|user-message|>')[0], replacements)
- bot_turn_stripped = replace_all(bot_turn.split('<|bot-message|>')[0], replacements)
-
- # Building the prompt
- i = len(shared.history['internal']) - 1
- while i >= 0 and len(encode(''.join(rows))[0]) < max_length:
- if _continue and i == len(shared.history['internal']) - 1:
- rows.insert(1, bot_turn_stripped + shared.history['internal'][i][1].strip())
- else:
- rows.insert(1, bot_turn.replace('<|bot-message|>', shared.history['internal'][i][1].strip()))
-
- string = shared.history['internal'][i][0]
- if string not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
- rows.insert(1, replace_all(user_turn, {'<|user-message|>': string.strip(), '<|round|>': str(i)}))
-
- i -= 1
-
- if impersonate:
- min_rows = 2
- rows.append(user_turn_stripped.rstrip(' '))
- elif not _continue:
- # Adding the user message
- if len(user_input) > 0:
- rows.append(replace_all(user_turn, {'<|user-message|>': user_input.strip(), '<|round|>': str(len(shared.history["internal"]))}))
-
- # Adding the Character prefix
- rows.append(apply_extensions("bot_prefix", bot_turn_stripped.rstrip(' ')))
-
- while len(rows) > min_rows and len(encode(''.join(rows))[0]) >= max_length:
- rows.pop(1)
-
- prompt = ''.join(rows)
- if also_return_rows:
- return prompt, rows
- else:
- return prompt
-
-
-def get_stopping_strings(state):
- if state['mode'] == 'instruct':
- stopping_strings = [f"\n{state['name1']}", f"\n{state['name2']}"]
- else:
- stopping_strings = [f"\n{state['name1']}:", f"\n{state['name2']}:"]
-
- stopping_strings += ast.literal_eval(f"[{state['custom_stopping_strings']}]")
- return stopping_strings
-
-
-def extract_message_from_reply(reply, state):
- next_character_found = False
- stopping_strings = get_stopping_strings(state)
-
- if state['stop_at_newline']:
- lines = reply.split('\n')
- reply = lines[0].strip()
- if len(lines) > 1:
- next_character_found = True
- else:
- for string in stopping_strings:
- idx = reply.find(string)
- if idx != -1:
- reply = reply[:idx]
- next_character_found = True
-
- # If something like "\nYo" is generated just before "\nYou:"
- # is completed, trim it
- if not next_character_found:
- for string in stopping_strings:
- for j in range(len(string) - 1, 0, -1):
- if reply[-j:] == string[:j]:
- reply = reply[:-j]
- break
- else:
- continue
-
- break
-
- return reply, next_character_found
-
-
-def chatbot_wrapper(text, state, regenerate=False, _continue=False):
- if shared.model_name == 'None' or shared.model is None:
- logging.error("No model is loaded! Select one in the Model tab.")
- yield shared.history['visible']
- return
-
- # Defining some variables
- cumulative_reply = ''
- just_started = True
- visible_text = None
- eos_token = '\n' if state['stop_at_newline'] else None
- stopping_strings = get_stopping_strings(state)
-
- # Preparing the input
- if not any((regenerate, _continue)):
- text, visible_text = apply_extensions('input_hijack', text, visible_text)
- if visible_text is None:
- visible_text = text
-
- text = apply_extensions('input', text)
- # *Is typing...*
- yield shared.history['visible'] + [[visible_text, shared.processing_message]]
- else:
- text, visible_text = shared.history['internal'][-1][0], shared.history['visible'][-1][0]
- if regenerate:
- shared.history['visible'].pop()
- shared.history['internal'].pop()
- # *Is typing...*
- yield shared.history['visible'] + [[visible_text, shared.processing_message]]
- elif _continue:
- last_reply = [shared.history['internal'][-1][1], shared.history['visible'][-1][1]]
- yield shared.history['visible'][:-1] + [[visible_text, last_reply[1] + '...']]
-
- # Generating the prompt
- kwargs = {'_continue': _continue}
- prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs)
- if prompt is None:
- prompt = generate_chat_prompt(text, state, **kwargs)
-
- # Generate
- for i in range(state['chat_generation_attempts']):
- reply = None
- for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings):
- reply = cumulative_reply + reply
-
- # Extracting the reply
- reply, next_character_found = extract_message_from_reply(reply, state)
- visible_reply = re.sub("(||{{user}})", state['name1'], reply)
- visible_reply = apply_extensions("output", visible_reply)
- if _continue:
- sep = ' ' if last_reply[0][-1] not in [' ', '\n'] else ''
- reply = last_reply[0] + sep + reply
- sep = ' ' if last_reply[1][-1] not in [' ', '\n'] else ''
- visible_reply = last_reply[1] + sep + visible_reply
-
- # We need this global variable to handle the Stop event,
- # otherwise gradio gets confused
- if shared.stop_everything:
- return shared.history['visible']
-
- if just_started:
- just_started = False
- if not _continue:
- shared.history['internal'].append(['', ''])
- shared.history['visible'].append(['', ''])
-
- shared.history['internal'][-1] = [text, reply]
- shared.history['visible'][-1] = [visible_text, visible_reply]
- yield shared.history['visible']
- if next_character_found:
- break
-
- if reply is not None:
- cumulative_reply = reply
-
- yield shared.history['visible']
-
-
-def impersonate_wrapper(text, state):
- if shared.model_name == 'None' or shared.model is None:
- logging.error("No model is loaded! Select one in the Model tab.")
- yield ''
- return
-
- # Defining some variables
- cumulative_reply = ''
- eos_token = '\n' if state['stop_at_newline'] else None
- prompt = generate_chat_prompt(text, state, impersonate=True)
- stopping_strings = get_stopping_strings(state)
-
- # Yield *Is typing...*
- yield shared.processing_message
- for i in range(state['chat_generation_attempts']):
- reply = None
- for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings):
- reply = cumulative_reply + reply
- reply, next_character_found = extract_message_from_reply(reply, state)
- yield reply
- if next_character_found:
- break
-
- if reply is not None:
- cumulative_reply = reply
-
- yield reply
-
-
-def cai_chatbot_wrapper(text, state):
- for history in chatbot_wrapper(text, state):
- yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'])
-
-
-def regenerate_wrapper(text, state):
- if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
- yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'])
- else:
- for history in chatbot_wrapper('', state, regenerate=True):
- yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'])
-
-
-def continue_wrapper(text, state):
- if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
- yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'])
- else:
- for history in chatbot_wrapper('', state, _continue=True):
- yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'])
-
-
-def remove_last_message(name1, name2, mode):
- if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
- last = shared.history['visible'].pop()
- shared.history['internal'].pop()
- else:
- last = ['', '']
-
- return chat_html_wrapper(shared.history['visible'], name1, name2, mode), last[0]
-
-
-def send_last_reply_to_input():
- if len(shared.history['internal']) > 0:
- return shared.history['internal'][-1][1]
- else:
- return ''
-
-
-def replace_last_reply(text, name1, name2, mode):
- if len(shared.history['visible']) > 0:
- shared.history['visible'][-1][1] = text
- shared.history['internal'][-1][1] = apply_extensions("input", text)
-
- return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
-
-
-def send_dummy_message(text, name1, name2, mode):
- shared.history['visible'].append([text, ''])
- shared.history['internal'].append([apply_extensions("input", text), ''])
- return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
-
-
-def send_dummy_reply(text, name1, name2, mode):
- if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '':
- shared.history['visible'].append(['', ''])
- shared.history['internal'].append(['', ''])
-
- shared.history['visible'][-1][1] = text
- shared.history['internal'][-1][1] = apply_extensions("input", text)
- return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
-
-
-def clear_html():
- return chat_html_wrapper([], "", "")
-
-
-def clear_chat_log(name1, name2, greeting, mode):
- shared.history['visible'] = []
- shared.history['internal'] = []
-
- if greeting != '':
- shared.history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
- shared.history['visible'] += [['', apply_extensions("output", greeting)]]
-
- # Save cleared logs
- save_history(mode)
- return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
-
-
-def redraw_html(name1, name2, mode):
- return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
-
-
-def tokenize_dialogue(dialogue, name1, name2, mode):
- history = []
- messages = []
- dialogue = re.sub('', '', dialogue)
- dialogue = re.sub('', '', dialogue)
- dialogue = re.sub('(\n|^)[Aa]non:', '\\1You:', dialogue)
- dialogue = re.sub('(\n|^)\[CHARACTER\]:', f'\\g<1>{name2}:', dialogue)
- idx = [m.start() for m in re.finditer(f"(^|\n)({re.escape(name1)}|{re.escape(name2)}):", dialogue)]
- if len(idx) == 0:
- return history
-
- for i in range(len(idx) - 1):
- messages.append(dialogue[idx[i]:idx[i + 1]].strip())
-
- messages.append(dialogue[idx[-1]:].strip())
- entry = ['', '']
- for i in messages:
- if i.startswith(f'{name1}:'):
- entry[0] = i[len(f'{name1}:'):].strip()
- elif i.startswith(f'{name2}:'):
- entry[1] = i[len(f'{name2}:'):].strip()
- if not (len(entry[0]) == 0 and len(entry[1]) == 0):
- history.append(entry)
-
- entry = ['', '']
-
- print("\033[1;32;1m\nDialogue tokenized to:\033[0;37;0m\n", end='')
- for row in history:
- for column in row:
- print("\n")
- for line in column.strip().split('\n'):
- print("| " + line + "\n")
-
- print("|\n")
- print("------------------------------")
-
- return history
-
-
-def save_history(mode, timestamp=False):
- # Instruct mode histories should not be saved as if
- # Alpaca or Vicuna were characters
- if mode == 'instruct':
- if not timestamp:
- return
-
- fname = f"Instruct_{datetime.now().strftime('%Y%m%d-%H%M%S')}.json"
- else:
- if timestamp:
- fname = f"{shared.character}_{datetime.now().strftime('%Y%m%d-%H%M%S')}.json"
- else:
- fname = f"{shared.character}_persistent.json"
-
- if not Path('logs').exists():
- Path('logs').mkdir()
-
- with open(Path(f'logs/{fname}'), 'w', encoding='utf-8') as f:
- f.write(json.dumps({'data': shared.history['internal'], 'data_visible': shared.history['visible']}, indent=2))
-
- return Path(f'logs/{fname}')
-
-
-def load_history(file, name1, name2):
- file = file.decode('utf-8')
- try:
- j = json.loads(file)
- if 'data' in j:
- shared.history['internal'] = j['data']
- if 'data_visible' in j:
- shared.history['visible'] = j['data_visible']
- else:
- shared.history['visible'] = copy.deepcopy(shared.history['internal'])
- except:
- shared.history['internal'] = tokenize_dialogue(file, name1, name2)
- shared.history['visible'] = copy.deepcopy(shared.history['internal'])
-
-
-def replace_character_names(text, name1, name2):
- text = text.replace('{{user}}', name1).replace('{{char}}', name2)
- return text.replace('', name1).replace('', name2)
-
-
-def build_pygmalion_style_context(data):
- context = ""
- if 'char_persona' in data and data['char_persona'] != '':
- context += f"{data['char_name']}'s Persona: {data['char_persona']}\n"
-
- if 'world_scenario' in data and data['world_scenario'] != '':
- context += f"Scenario: {data['world_scenario']}\n"
-
- context = f"{context.strip()}\n\n"
- return context
-
-
-def generate_pfp_cache(character):
- cache_folder = Path("cache")
- if not cache_folder.exists():
- cache_folder.mkdir()
-
- for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
- if path.exists():
- img = make_thumbnail(Image.open(path))
- img.save(Path('cache/pfp_character.png'), format='PNG')
- return img
-
- return None
-
-
-def load_character(character, name1, name2, mode):
- shared.character = character
- context = greeting = turn_template = ""
- greeting_field = 'greeting'
- picture = None
-
- # Deleting the profile picture cache, if any
- if Path("cache/pfp_character.png").exists():
- Path("cache/pfp_character.png").unlink()
-
- if character != 'None':
- folder = 'characters' if not mode == 'instruct' else 'characters/instruction-following'
- picture = generate_pfp_cache(character)
- for extension in ["yml", "yaml", "json"]:
- filepath = Path(f'{folder}/{character}.{extension}')
- if filepath.exists():
- break
-
- file_contents = open(filepath, 'r', encoding='utf-8').read()
- data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents)
-
- # Finding the bot's name
- for k in ['name', 'bot', '<|bot|>', 'char_name']:
- if k in data and data[k] != '':
- name2 = data[k]
- break
-
- # Find the user name (if any)
- for k in ['your_name', 'user', '<|user|>']:
- if k in data and data[k] != '':
- name1 = data[k]
- break
- else:
- name1 = shared.settings['name1']
-
- for field in ['context', 'greeting', 'example_dialogue', 'char_persona', 'char_greeting', 'world_scenario']:
- if field in data:
- data[field] = replace_character_names(data[field], name1, name2)
-
- if 'context' in data:
- context = data['context']
- if mode != 'instruct':
- context = context.strip() + '\n\n'
- elif "char_persona" in data:
- context = build_pygmalion_style_context(data)
- greeting_field = 'char_greeting'
-
- if 'example_dialogue' in data:
- context += f"{data['example_dialogue'].strip()}\n"
-
- if greeting_field in data:
- greeting = data[greeting_field]
-
- if 'turn_template' in data:
- turn_template = data['turn_template']
-
- else:
- context = shared.settings['context']
- name2 = shared.settings['name2']
- greeting = shared.settings['greeting']
- turn_template = shared.settings['turn_template']
-
- if mode != 'instruct':
- shared.history['internal'] = []
- shared.history['visible'] = []
- if Path(f'logs/{shared.character}_persistent.json').exists():
- load_history(open(Path(f'logs/{shared.character}_persistent.json'), 'rb').read(), name1, name2)
- else:
- # Insert greeting if it exists
- if greeting != "":
- shared.history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
- shared.history['visible'] += [['', apply_extensions("output", greeting)]]
-
- # Create .json log files since they don't already exist
- save_history(mode)
-
- return name1, name2, picture, greeting, context, repr(turn_template)[1:-1], chat_html_wrapper(shared.history['visible'], name1, name2, mode)
-
-
-def upload_character(json_file, img, tavern=False):
- json_file = json_file if type(json_file) == str else json_file.decode('utf-8')
- data = json.loads(json_file)
- outfile_name = data["char_name"]
- i = 1
- while Path(f'characters/{outfile_name}.json').exists():
- outfile_name = f'{data["char_name"]}_{i:03d}'
- i += 1
-
- if tavern:
- outfile_name = f'TavernAI-{outfile_name}'
-
- with open(Path(f'characters/{outfile_name}.json'), 'w', encoding='utf-8') as f:
- f.write(json_file)
-
- if img is not None:
- img = Image.open(io.BytesIO(img))
- img.save(Path(f'characters/{outfile_name}.png'))
-
- logging.info(f'New character saved to "characters/{outfile_name}.json".')
- return outfile_name
-
-
-def upload_tavern_character(img, name1, name2):
- _img = Image.open(io.BytesIO(img))
- _img.getexif()
- decoded_string = base64.b64decode(_img.info['chara'])
- _json = json.loads(decoded_string)
- _json = {"char_name": _json['name'], "char_persona": _json['description'], "char_greeting": _json["first_mes"], "example_dialogue": _json['mes_example'], "world_scenario": _json['scenario']}
- return upload_character(json.dumps(_json), img, tavern=True)
-
-
-def upload_your_profile_picture(img, name1, name2, mode):
- cache_folder = Path("cache")
- if not cache_folder.exists():
- cache_folder.mkdir()
-
- if img is None:
- if Path("cache/pfp_me.png").exists():
- Path("cache/pfp_me.png").unlink()
- else:
- img = make_thumbnail(img)
- img.save(Path('cache/pfp_me.png'))
- logging.info('Profile picture saved to "cache/pfp_me.png"')
-
- return chat_html_wrapper(shared.history['visible'], name1, name2, mode, reset_cache=True)
diff --git a/spaces/aphenx/bingo/src/pages/api/sydney.ts b/spaces/aphenx/bingo/src/pages/api/sydney.ts
deleted file mode 100644
index 0e7bbf23d77c2e1a6635185a060eeee58b8c8e66..0000000000000000000000000000000000000000
--- a/spaces/aphenx/bingo/src/pages/api/sydney.ts
+++ /dev/null
@@ -1,62 +0,0 @@
-import { NextApiRequest, NextApiResponse } from 'next'
-import { WebSocket, debug } from '@/lib/isomorphic'
-import { BingWebBot } from '@/lib/bots/bing'
-import { websocketUtils } from '@/lib/bots/bing/utils'
-import { WatchDog, createHeaders } from '@/lib/utils'
-
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- const conversationContext = req.body
- const headers = createHeaders(req.cookies)
- debug(headers)
- res.setHeader('Content-Type', 'text/stream; charset=UTF-8')
-
- const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', {
- headers: {
- ...headers,
- 'accept-language': 'zh-CN,zh;q=0.9',
- 'cache-control': 'no-cache',
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- pragma: 'no-cache',
- }
- })
-
- const closeDog = new WatchDog()
- const timeoutDog = new WatchDog()
- ws.onmessage = (event) => {
- timeoutDog.watch(() => {
- ws.send(websocketUtils.packMessage({ type: 6 }))
- }, 1500)
- closeDog.watch(() => {
- ws.close()
- }, 10000)
- res.write(event.data)
- if (/\{"type":([367])\}/.test(String(event.data))) {
- const type = parseInt(RegExp.$1, 10)
- debug('connection type', type)
- if (type === 3) {
- ws.close()
- } else {
- ws.send(websocketUtils.packMessage({ type }))
- }
- }
- }
-
- ws.onclose = () => {
- timeoutDog.reset()
- closeDog.reset()
- debug('connection close')
- res.end()
- }
-
- await new Promise((resolve) => ws.onopen = resolve)
- ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 }))
- ws.send(websocketUtils.packMessage({ type: 6 }))
- ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!)))
- req.socket.once('close', () => {
- ws.close()
- if (!res.closed) {
- res.end()
- }
- })
-}
diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/neural_hmm.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/neural_hmm.py
deleted file mode 100644
index 0631ba98c00029e9871c965e4c7f465aa32bc406..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/neural_hmm.py
+++ /dev/null
@@ -1,553 +0,0 @@
-from typing import List
-
-import torch
-import torch.distributions as tdist
-import torch.nn.functional as F
-from torch import nn
-from torch.utils.checkpoint import checkpoint
-
-from TTS.tts.layers.overflow.common_layers import Outputnet, OverflowUtils
-from TTS.tts.layers.tacotron.common_layers import Prenet
-from TTS.tts.utils.helpers import sequence_mask
-
-
-class NeuralHMM(nn.Module):
- """Autoregressive left to right HMM model primarily used in "Neural HMMs are all you need (for high-quality attention-free TTS)"
-
- Paper::
- https://arxiv.org/abs/2108.13320
-
- Paper abstract::
- Neural sequence-to-sequence TTS has achieved significantly better output quality than statistical speech synthesis using
- HMMs. However, neural TTS is generally not probabilistic and uses non-monotonic attention. Attention failures increase
- training time and can make synthesis babble incoherently. This paper describes how the old and new paradigms can be
- combined to obtain the advantages of both worlds, by replacing attention in neural TTS with an autoregressive left-right
- no-skip hidden Markov model defined by a neural network. Based on this proposal, we modify Tacotron 2 to obtain an
- HMM-based neural TTS model with monotonic alignment, trained to maximise the full sequence likelihood without
- approximation. We also describe how to combine ideas from classical and contemporary TTS for best results. The resulting
- example system is smaller and simpler than Tacotron 2, and learns to speak with fewer iterations and less data, whilst
- achieving comparable naturalness prior to the post-net. Our approach also allows easy control over speaking rate.
-
- Args:
- frame_channels (int): Output dimension to generate.
- ar_order (int): Autoregressive order of the model. In ablations of Neural HMM it was found that more autoregression while giving more variation hurts naturalness of the synthesised audio.
- deterministic_transition (bool): deterministic duration generation based on duration quantiles as defiend in "S. Ronanki, O. Watts, S. King, and G. E. Henter, “Medianbased generation of synthetic speech durations using a nonparametric approach,” in Proc. SLT, 2016.". Defaults to True.
- encoder_dim (int): Channels of encoder input and character embedding tensors. Defaults to 512.
- prenet_type (str): `original` or `bn`. `original` sets the default Prenet and `bn` uses Batch Normalization version of the Prenet.
- prenet_dim (int): Dimension of the Prenet.
- prenet_n_layers (int): Number of layers in the Prenet.
- prenet_dropout (float): Dropout probability of the Prenet.
- prenet_dropout_at_inference (bool): If True, dropout is applied at inference time.
- memory_rnn_dim (int): Size of the memory RNN to process output of prenet.
- outputnet_size (List[int]): Size of the output network inside the neural HMM.
- flat_start_params (dict): Parameters for the flat start initialization of the neural HMM.
- std_floor (float): Floor value for the standard deviation of the neural HMM. Prevents model cheating by putting point mass and getting infinite likelihood at any datapoint.
- use_grad_checkpointing (bool, optional): Use gradient checkpointing to save memory. Defaults to True.
- """
-
- def __init__(
- self,
- frame_channels: int,
- ar_order: int,
- deterministic_transition: bool,
- encoder_dim: int,
- prenet_type: str,
- prenet_dim: int,
- prenet_n_layers: int,
- prenet_dropout: float,
- prenet_dropout_at_inference: bool,
- memory_rnn_dim: int,
- outputnet_size: List[int],
- flat_start_params: dict,
- std_floor: float,
- use_grad_checkpointing: bool = True,
- ):
- super().__init__()
-
- self.frame_channels = frame_channels
- self.ar_order = ar_order
- self.deterministic_transition = deterministic_transition
- self.prenet_dim = prenet_dim
- self.memory_rnn_dim = memory_rnn_dim
- self.use_grad_checkpointing = use_grad_checkpointing
-
- self.transition_model = TransitionModel()
- self.emission_model = EmissionModel()
-
- assert ar_order > 0, f"AR order must be greater than 0 provided {ar_order}"
-
- self.ar_order = ar_order
- self.prenet = Prenet(
- in_features=frame_channels * ar_order,
- prenet_type=prenet_type,
- prenet_dropout=prenet_dropout,
- dropout_at_inference=prenet_dropout_at_inference,
- out_features=[self.prenet_dim for _ in range(prenet_n_layers)],
- bias=False,
- )
- self.memory_rnn = nn.LSTMCell(input_size=prenet_dim, hidden_size=memory_rnn_dim)
- self.output_net = Outputnet(
- encoder_dim, memory_rnn_dim, frame_channels, outputnet_size, flat_start_params, std_floor
- )
- self.register_buffer("go_tokens", torch.zeros(ar_order, 1))
-
- def forward(self, inputs, inputs_len, mels, mel_lens):
- r"""HMM forward algorithm for training uses logarithmic version of Rabiner (1989) forward algorithm.
-
- Args:
- inputs (torch.FloatTensor): Encoder outputs
- inputs_len (torch.LongTensor): Encoder output lengths
- mels (torch.FloatTensor): Mel inputs
- mel_lens (torch.LongTensor): Length of mel inputs
-
- Shapes:
- - inputs: (B, T, D_out_enc)
- - inputs_len: (B)
- - mels: (B, D_mel, T_mel)
- - mel_lens: (B)
-
- Returns:
- log_prob (torch.FloatTensor): Log probability of the sequence
- """
- # Get dimensions of inputs
- batch_size, N, _ = inputs.shape
- T_max = torch.max(mel_lens)
- mels = mels.permute(0, 2, 1)
-
- # Intialize forward algorithm
- log_state_priors = self._initialize_log_state_priors(inputs)
- log_c, log_alpha_scaled, transition_matrix, means = self._initialize_forward_algorithm_variables(mels, N)
-
- # Initialize autoregression elements
- ar_inputs = self._add_go_token(mels)
- h_memory, c_memory = self._init_lstm_states(batch_size, self.memory_rnn_dim, mels)
-
- for t in range(T_max):
- # Process Autoregression
- h_memory, c_memory = self._process_ar_timestep(t, ar_inputs, h_memory, c_memory)
- # Get mean, std and transition vector from decoder for this timestep
- # Note: Gradient checkpointing currently doesn't works with multiple gpus inside a loop
- if self.use_grad_checkpointing and self.training:
- mean, std, transition_vector = checkpoint(self.output_net, h_memory, inputs)
- else:
- mean, std, transition_vector = self.output_net(h_memory, inputs)
-
- if t == 0:
- log_alpha_temp = log_state_priors + self.emission_model(mels[:, 0], mean, std, inputs_len)
- else:
- log_alpha_temp = self.emission_model(mels[:, t], mean, std, inputs_len) + self.transition_model(
- log_alpha_scaled[:, t - 1, :], transition_vector, inputs_len
- )
- log_c[:, t] = torch.logsumexp(log_alpha_temp, dim=1)
- log_alpha_scaled[:, t, :] = log_alpha_temp - log_c[:, t].unsqueeze(1)
- transition_matrix[:, t] = transition_vector # needed for absorption state calculation
-
- # Save for plotting
- means.append(mean.detach())
-
- log_c, log_alpha_scaled = self._mask_lengths(mel_lens, log_c, log_alpha_scaled)
-
- sum_final_log_c = self.get_absorption_state_scaling_factor(
- mel_lens, log_alpha_scaled, inputs_len, transition_matrix
- )
-
- log_probs = torch.sum(log_c, dim=1) + sum_final_log_c
-
- return log_probs, log_alpha_scaled, transition_matrix, means
-
- @staticmethod
- def _mask_lengths(mel_lens, log_c, log_alpha_scaled):
- """
- Mask the lengths of the forward variables so that the variable lenghts
- do not contribute in the loss calculation
- Args:
- mel_inputs (torch.FloatTensor): (batch, T, frame_channels)
- mel_inputs_lengths (torch.IntTensor): (batch)
- log_c (torch.FloatTensor): (batch, T)
- Returns:
- log_c (torch.FloatTensor) : scaled probabilities (batch, T)
- log_alpha_scaled (torch.FloatTensor): forward probabilities (batch, T, N)
- """
- mask_log_c = sequence_mask(mel_lens)
- log_c = log_c * mask_log_c
- mask_log_alpha_scaled = mask_log_c.unsqueeze(2)
- log_alpha_scaled = log_alpha_scaled * mask_log_alpha_scaled
- return log_c, log_alpha_scaled
-
- def _process_ar_timestep(
- self,
- t,
- ar_inputs,
- h_memory,
- c_memory,
- ):
- """
- Process autoregression in timestep
- 1. At a specific t timestep
- 2. Perform data dropout if applied (we did not use it)
- 3. Run the autoregressive frame through the prenet (has dropout)
- 4. Run the prenet output through the post prenet rnn
-
- Args:
- t (int): mel-spec timestep
- ar_inputs (torch.FloatTensor): go-token appended mel-spectrograms
- - shape: (b, D_out, T_out)
- h_post_prenet (torch.FloatTensor): previous timestep rnn hidden state
- - shape: (b, memory_rnn_dim)
- c_post_prenet (torch.FloatTensor): previous timestep rnn cell state
- - shape: (b, memory_rnn_dim)
-
- Returns:
- h_post_prenet (torch.FloatTensor): rnn hidden state of the current timestep
- c_post_prenet (torch.FloatTensor): rnn cell state of the current timestep
- """
- prenet_input = ar_inputs[:, t : t + self.ar_order].flatten(1)
- memory_inputs = self.prenet(prenet_input)
- h_memory, c_memory = self.memory_rnn(memory_inputs, (h_memory, c_memory))
- return h_memory, c_memory
-
- def _add_go_token(self, mel_inputs):
- """Append the go token to create the autoregressive input
- Args:
- mel_inputs (torch.FloatTensor): (batch_size, T, n_mel_channel)
- Returns:
- ar_inputs (torch.FloatTensor): (batch_size, T, n_mel_channel)
- """
- batch_size, T, _ = mel_inputs.shape
- go_tokens = self.go_tokens.unsqueeze(0).expand(batch_size, self.ar_order, self.frame_channels)
- ar_inputs = torch.cat((go_tokens, mel_inputs), dim=1)[:, :T]
- return ar_inputs
-
- @staticmethod
- def _initialize_forward_algorithm_variables(mel_inputs, N):
- r"""Initialize placeholders for forward algorithm variables, to use a stable
- version we will use log_alpha_scaled and the scaling constant
-
- Args:
- mel_inputs (torch.FloatTensor): (b, T_max, frame_channels)
- N (int): number of states
- Returns:
- log_c (torch.FloatTensor): Scaling constant (b, T_max)
- """
- b, T_max, _ = mel_inputs.shape
- log_alpha_scaled = mel_inputs.new_zeros((b, T_max, N))
- log_c = mel_inputs.new_zeros(b, T_max)
- transition_matrix = mel_inputs.new_zeros((b, T_max, N))
-
- # Saving for plotting later, will not have gradient tapes
- means = []
- return log_c, log_alpha_scaled, transition_matrix, means
-
- @staticmethod
- def _init_lstm_states(batch_size, hidden_state_dim, device_tensor):
- r"""
- Initialize Hidden and Cell states for LSTM Cell
-
- Args:
- batch_size (Int): batch size
- hidden_state_dim (Int): dimensions of the h and c
- device_tensor (torch.FloatTensor): useful for the device and type
-
- Returns:
- (torch.FloatTensor): shape (batch_size, hidden_state_dim)
- can be hidden state for LSTM
- (torch.FloatTensor): shape (batch_size, hidden_state_dim)
- can be the cell state for LSTM
- """
- return (
- device_tensor.new_zeros(batch_size, hidden_state_dim),
- device_tensor.new_zeros(batch_size, hidden_state_dim),
- )
-
- def get_absorption_state_scaling_factor(self, mels_len, log_alpha_scaled, inputs_len, transition_vector):
- """Returns the final scaling factor of absorption state
-
- Args:
- mels_len (torch.IntTensor): Input size of mels to
- get the last timestep of log_alpha_scaled
- log_alpha_scaled (torch.FloatTEnsor): State probabilities
- text_lengths (torch.IntTensor): length of the states to
- mask the values of states lengths
- (
- Useful when the batch has very different lengths,
- when the length of an observation is less than
- the number of max states, then the log alpha after
- the state value is filled with -infs. So we mask
- those values so that it only consider the states
- which are needed for that length
- )
- transition_vector (torch.FloatTensor): transtiion vector for each state per timestep
-
- Shapes:
- - mels_len: (batch_size)
- - log_alpha_scaled: (batch_size, N, T)
- - text_lengths: (batch_size)
- - transition_vector: (batch_size, N, T)
-
- Returns:
- sum_final_log_c (torch.FloatTensor): (batch_size)
-
- """
- N = torch.max(inputs_len)
- max_inputs_len = log_alpha_scaled.shape[2]
- state_lengths_mask = sequence_mask(inputs_len, max_len=max_inputs_len)
-
- last_log_alpha_scaled_index = (
- (mels_len - 1).unsqueeze(-1).expand(-1, N).unsqueeze(1)
- ) # Batch X Hidden State Size
- last_log_alpha_scaled = torch.gather(log_alpha_scaled, 1, last_log_alpha_scaled_index).squeeze(1)
- last_log_alpha_scaled = last_log_alpha_scaled.masked_fill(~state_lengths_mask, -float("inf"))
-
- last_transition_vector = torch.gather(transition_vector, 1, last_log_alpha_scaled_index).squeeze(1)
- last_transition_probability = torch.sigmoid(last_transition_vector)
- log_probability_of_transitioning = OverflowUtils.log_clamped(last_transition_probability)
-
- last_transition_probability_index = self.get_mask_for_last_item(inputs_len, inputs_len.device)
- log_probability_of_transitioning = log_probability_of_transitioning.masked_fill(
- ~last_transition_probability_index, -float("inf")
- )
- final_log_c = last_log_alpha_scaled + log_probability_of_transitioning
-
- # If the length of the mel is less than the number of states it will select the -inf values leading to nan gradients
- # Ideally, we should clean the dataset otherwise this is a little hack uncomment the line below
- final_log_c = final_log_c.clamp(min=torch.finfo(final_log_c.dtype).min)
-
- sum_final_log_c = torch.logsumexp(final_log_c, dim=1)
- return sum_final_log_c
-
- @staticmethod
- def get_mask_for_last_item(lengths, device, out_tensor=None):
- """Returns n-1 mask for the last item in the sequence.
-
- Args:
- lengths (torch.IntTensor): lengths in a batch
- device (str, optional): Defaults to "cpu".
- out_tensor (torch.Tensor, optional): uses the memory of a specific tensor.
- Defaults to None.
-
- Returns:
- - Shape: :math:`(b, max_len)`
- """
- max_len = torch.max(lengths).item()
- ids = (
- torch.arange(0, max_len, device=device) if out_tensor is None else torch.arange(0, max_len, out=out_tensor)
- )
- mask = ids == lengths.unsqueeze(1) - 1
- return mask
-
- @torch.inference_mode()
- def inference(
- self,
- inputs: torch.FloatTensor,
- input_lens: torch.LongTensor,
- sampling_temp: float,
- max_sampling_time: int,
- duration_threshold: float,
- ):
- """Inference from autoregressive neural HMM
-
- Args:
- inputs (torch.FloatTensor): input states
- - shape: :math:`(b, T, d)`
- input_lens (torch.LongTensor): input state lengths
- - shape: :math:`(b)`
- sampling_temp (float): sampling temperature
- max_sampling_temp (int): max sampling temperature
- duration_threshold (float): duration threshold to switch to next state
- - Use this to change the spearking rate of the synthesised audio
- """
-
- b = inputs.shape[0]
- outputs = {
- "hmm_outputs": [],
- "hmm_outputs_len": [],
- "alignments": [],
- "input_parameters": [],
- "output_parameters": [],
- }
- for i in range(b):
- neural_hmm_outputs, states_travelled, input_parameters, output_parameters = self.sample(
- inputs[i : i + 1], input_lens[i], sampling_temp, max_sampling_time, duration_threshold
- )
-
- outputs["hmm_outputs"].append(neural_hmm_outputs)
- outputs["hmm_outputs_len"].append(neural_hmm_outputs.shape[0])
- outputs["alignments"].append(states_travelled)
- outputs["input_parameters"].append(input_parameters)
- outputs["output_parameters"].append(output_parameters)
-
- outputs["hmm_outputs"] = nn.utils.rnn.pad_sequence(outputs["hmm_outputs"], batch_first=True)
- outputs["hmm_outputs_len"] = torch.tensor(
- outputs["hmm_outputs_len"], dtype=input_lens.dtype, device=input_lens.device
- )
- return outputs
-
- @torch.inference_mode()
- def sample(self, inputs, input_lens, sampling_temp, max_sampling_time, duration_threshold):
- """Samples an output from the parameter models
-
- Args:
- inputs (torch.FloatTensor): input states
- - shape: :math:`(1, T, d)`
- input_lens (torch.LongTensor): input state lengths
- - shape: :math:`(1)`
- sampling_temp (float): sampling temperature
- max_sampling_time (int): max sampling time
- duration_threshold (float): duration threshold to switch to next state
-
- Returns:
- outputs (torch.FloatTensor): Output Observations
- - Shape: :math:`(T, output_dim)`
- states_travelled (list[int]): Hidden states travelled
- - Shape: :math:`(T)`
- input_parameters (list[torch.FloatTensor]): Input parameters
- output_parameters (list[torch.FloatTensor]): Output parameters
- """
- states_travelled, outputs, t = [], [], 0
-
- # Sample initial state
- current_state = 0
- states_travelled.append(current_state)
-
- # Prepare autoregression
- prenet_input = self.go_tokens.unsqueeze(0).expand(1, self.ar_order, self.frame_channels)
- h_memory, c_memory = self._init_lstm_states(1, self.memory_rnn_dim, prenet_input)
-
- input_parameter_values = []
- output_parameter_values = []
- quantile = 1
- while True:
- memory_input = self.prenet(prenet_input.flatten(1).unsqueeze(0))
- # will be 1 while sampling
- h_memory, c_memory = self.memory_rnn(memory_input.squeeze(0), (h_memory, c_memory))
-
- z_t = inputs[:, current_state].unsqueeze(0) # Add fake time dimension
- mean, std, transition_vector = self.output_net(h_memory, z_t)
-
- transition_probability = torch.sigmoid(transition_vector.flatten())
- staying_probability = torch.sigmoid(-transition_vector.flatten())
-
- # Save for plotting
- input_parameter_values.append([prenet_input, current_state])
- output_parameter_values.append([mean, std, transition_probability])
-
- x_t = self.emission_model.sample(mean, std, sampling_temp=sampling_temp)
-
- # Prepare autoregressive input for next iteration
- prenet_input = torch.cat((prenet_input, x_t), dim=1)[:, 1:]
-
- outputs.append(x_t.flatten())
-
- transition_matrix = torch.cat((staying_probability, transition_probability))
- quantile *= staying_probability
- if not self.deterministic_transition:
- switch = transition_matrix.multinomial(1)[0].item()
- else:
- switch = quantile < duration_threshold
-
- if switch:
- current_state += 1
- quantile = 1
-
- states_travelled.append(current_state)
-
- if (current_state == input_lens) or (max_sampling_time and t == max_sampling_time - 1):
- break
-
- t += 1
-
- return (
- torch.stack(outputs, dim=0),
- F.one_hot(input_lens.new_tensor(states_travelled)),
- input_parameter_values,
- output_parameter_values,
- )
-
- @staticmethod
- def _initialize_log_state_priors(text_embeddings):
- """Creates the log pi in forward algorithm.
-
- Args:
- text_embeddings (torch.FloatTensor): used to create the log pi
- on current device
-
- Shapes:
- - text_embeddings: (B, T, D_out_enc)
- """
- N = text_embeddings.shape[1]
- log_state_priors = text_embeddings.new_full([N], -float("inf"))
- log_state_priors[0] = 0.0
- return log_state_priors
-
-
-class TransitionModel(nn.Module):
- """Transition Model of the HMM, it represents the probability of transitioning
- form current state to all other states"""
-
- def forward(self, log_alpha_scaled, transition_vector, inputs_len): # pylint: disable=no-self-use
- r"""
- product of the past state with transitional probabilities in log space
-
- Args:
- log_alpha_scaled (torch.Tensor): Multiply previous timestep's alphas by
- transition matrix (in log domain)
- - shape: (batch size, N)
- transition_vector (torch.tensor): transition vector for each state
- - shape: (N)
- inputs_len (int tensor): Lengths of states in a batch
- - shape: (batch)
-
- Returns:
- out (torch.FloatTensor): log probability of transitioning to each state
- """
- transition_p = torch.sigmoid(transition_vector)
- staying_p = torch.sigmoid(-transition_vector)
-
- log_staying_probability = OverflowUtils.log_clamped(staying_p)
- log_transition_probability = OverflowUtils.log_clamped(transition_p)
-
- staying = log_alpha_scaled + log_staying_probability
- leaving = log_alpha_scaled + log_transition_probability
- leaving = leaving.roll(1, dims=1)
- leaving[:, 0] = -float("inf")
- inputs_len_mask = sequence_mask(inputs_len)
- out = OverflowUtils.logsumexp(torch.stack((staying, leaving), dim=2), dim=2)
- out = out.masked_fill(~inputs_len_mask, -float("inf")) # There are no states to contribute to the loss
- return out
-
-
-class EmissionModel(nn.Module):
- """Emission Model of the HMM, it represents the probability of
- emitting an observation based on the current state"""
-
- def __init__(self) -> None:
- super().__init__()
- self.distribution_function: tdist.Distribution = tdist.normal.Normal
-
- def sample(self, means, stds, sampling_temp):
- return self.distribution_function(means, stds * sampling_temp).sample() if sampling_temp > 0 else means
-
- def forward(self, x_t, means, stds, state_lengths):
- r"""Calculates the log probability of the the given data (x_t)
- being observed from states with given means and stds
- Args:
- x_t (float tensor) : observation at current time step
- - shape: (batch, feature_dim)
- means (float tensor): means of the distributions of hidden states
- - shape: (batch, hidden_state, feature_dim)
- stds (float tensor): standard deviations of the distributions of the hidden states
- - shape: (batch, hidden_state, feature_dim)
- state_lengths (int tensor): Lengths of states in a batch
- - shape: (batch)
-
- Returns:
- out (float tensor): observation log likelihoods,
- expressing the probability of an observation
- being generated from a state i
- shape: (batch, hidden_state)
- """
- emission_dists = self.distribution_function(means, stds)
- out = emission_dists.log_prob(x_t.unsqueeze(1))
- state_lengths_mask = sequence_mask(state_lengths).unsqueeze(2)
- out = torch.sum(out * state_lengths_mask, dim=2)
- return out
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Signature/test_pss.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Signature/test_pss.py
deleted file mode 100644
index 535474bec5f00a4b02185c3bdfe4b1e303cdb9fd..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Signature/test_pss.py
+++ /dev/null
@@ -1,377 +0,0 @@
-# ===================================================================
-#
-# Copyright (c) 2014, Legrandin
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-# ===================================================================
-
-import unittest
-
-from Crypto.Util.py3compat import b, bchr
-from Crypto.Util.number import bytes_to_long
-from Crypto.Util.strxor import strxor
-from Crypto.SelfTest.st_common import list_test_cases
-from Crypto.SelfTest.loader import load_test_vectors, load_test_vectors_wycheproof
-
-from Crypto.Hash import SHA1, SHA224, SHA256, SHA384, SHA512
-from Crypto.PublicKey import RSA
-from Crypto.Signature import pss
-from Crypto.Signature import PKCS1_PSS
-
-from Crypto.Signature.pss import MGF1
-
-
-def load_hash_by_name(hash_name):
- return __import__("Crypto.Hash." + hash_name, globals(), locals(), ["new"])
-
-
-class PRNG(object):
-
- def __init__(self, stream):
- self.stream = stream
- self.idx = 0
-
- def __call__(self, rnd_size):
- result = self.stream[self.idx:self.idx + rnd_size]
- self.idx += rnd_size
- return result
-
-
-class PSS_Tests(unittest.TestCase):
-
- rsa_key = b'-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAsvI34FgiTK8+txBvmooNGpNwk23YTU51dwNZi5yha3W4lA/Q\nvcZrDalkmD7ekWQwnduxVKa6pRSI13KBgeUOIqJoGXSWhntEtY3FEwvWOHW5AE7Q\njUzTzCiYT6TVaCcpa/7YLai+p6ai2g5f5Zfh4jSawa9uYeuggFygQq4IVW796MgV\nyqxYMM/arEj+/sKz3Viua9Rp9fFosertCYCX4DUTgW0mX9bwEnEOgjSI3pLOPXz1\n8vx+DRZS5wMCmwCUa0sKonLn3cAUPq+sGix7+eo7T0Z12MU8ud7IYVX/75r3cXiF\nPaYE2q8Le0kgOApIXbb+x74x0rNgyIh1yGygkwIDAQABAoIBABz4t1A0pLT6qHI2\nEIOaNz3mwhK0dZEqkz0GB1Dhtoax5ATgvKCFB98J3lYB08IBURe1snOsnMpOVUtg\naBRSM+QqnCUG6bnzKjAkuFP5liDE+oNQv1YpKp9CsUovuzdmI8Au3ewihl+ZTIN2\nUVNYMEOR1b5m+z2SSwWNOYsiJwpBrT7zkpdlDyjat7FiiPhMMIMXjhQFVxURMIcB\njUBtPzGvV/PG90cVDWi1wRGeeP1dDqti/jsnvykQ15KW1MqGrpeNKRmDdTy/Ucl1\nWIoYklKw3U456lgZ/rDTDB818+Tlnk35z4yF7d5ANPM8CKfqOPcnO1BCKVFzf4eq\n54wvUtkCgYEA1Zv2lp06l7rXMsvNtyYQjbFChezRDRnPwZmN4NCdRtTgGG1G0Ryd\nYz6WWoPGqZp0b4LAaaHd3W2GTcpXF8WXMKfMX1W+tMAxMozfsXRKMcHoypwuS5wT\nfJRXJCG4pvd57AB0iVUEJW2we+uGKU5Zxcx//id2nXGCpoRyViIplQsCgYEA1nVC\neHupHChht0Fh4N09cGqZHZzuwXjOUMzR3Vsfz+4WzVS3NvIgN4g5YgmQFOeKwo5y\niRq5yvubcNdFvf85eHWClg0zPAyxJCVUWigCrrOanGEhJo6re4idJvNVzu4Ucg0v\n6B3SJ1HsCda+ZSNz24bSyqRep8A+RoAaoVSFx5kCgYEAn3RvXPs9s+obnqWYiPF3\nRe5etE6Vt2vfNKwFxx6zaR6bsmBQjuUHcABWiHb6I71S0bMPI0tbrWGG8ibrYKl1\nNTLtUvVVCOS3VP7oNTWT9RTFTAnOXU7DFSo+6o/poWn3r36ff6zhDXeWWMr2OXtt\ndEQ1/2lCGEGVv+v61eVmmQUCgYABFHITPTwqwiFL1O5zPWnzyPWgaovhOYSAb6eW\n38CXQXGn8wdBJZL39J2lWrr4//l45VK6UgIhfYbY2JynSkO10ZGow8RARygVMILu\nOUlaK9lZdDvAf/NpGdUAvzTtZ9F+iYZ2OsA2JnlzyzsGM1l//3vMPWukmJk3ral0\nqoJJ8QKBgGRG3eVHnIegBbFVuMDp2NTcfuSuDVUQ1fGAwtPiFa8u81IodJnMk2pq\niXu2+0ytNA/M+SVrAnE2AgIzcaJbtr0p2srkuVM7KMWnG1vWFNjtXN8fAhf/joOv\nD+NmPL/N4uE57e40tbiU/H7KdyZaDt+5QiTmdhuyAe6CBjKsF2jy\n-----END RSA PRIVATE KEY-----'
- msg = b'AAA'
- tag = b'\x00[c5\xd8\xb0\x8b!D\x81\x83\x07\xc0\xdd\xb9\xb4\xb2`\x92\xe7\x02\xf1\xe1P\xea\xc3\xf0\xe3>\xddX5\xdd\x8e\xc5\x89\xef\xf3\xc2\xdc\xfeP\x02\x7f\x12+\xc9\xaf\xbb\xec\xfe\xb0\xa5\xb9\x08\x11P\x8fL\xee5\x9b\xb0k{=_\xd2\x14\xfb\x01R\xb7\xfe\x14}b\x03\x8d5Y\x89~}\xfc\xf2l\xd01-\xbd\xeb\x11\xcdV\x11\xe9l\x19k/o5\xa2\x0f\x15\xe7Q$\t=\xec\x1dAB\x19\xa5P\x9a\xaf\xa3G\x86"\xd6~\xf0j\xfcqkbs\x13\x84b\xe4\xbdm(\xed`\xa4F\xfb\x8f.\xe1\x8c)/_\x9eS\x98\xa4v\xb8\xdc\xfe\xf7/D\x18\x19\xb3T\x97:\xe2\x96s\xe8<\xa2\xb4\xb9\xf8/'
-
- def test_positive_1(self):
- key = RSA.import_key(self.rsa_key)
- h = SHA256.new(self.msg)
- verifier = pss.new(key)
- verifier.verify(h, self.tag)
-
- def test_negative_1(self):
- key = RSA.import_key(self.rsa_key)
- h = SHA256.new(self.msg + b'A')
- verifier = pss.new(key)
- tag = bytearray(self.tag)
- self.assertRaises(ValueError, verifier.verify, h, tag)
-
- def test_negative_2(self):
- key = RSA.import_key(self.rsa_key)
- h = SHA256.new(self.msg)
- verifier = pss.new(key, salt_bytes=1000)
- tag = bytearray(self.tag)
- self.assertRaises(ValueError, verifier.verify, h, tag)
-
-
-class FIPS_PKCS1_Verify_Tests(unittest.TestCase):
-
- def shortDescription(self):
- return "FIPS PKCS1 Tests (Verify)"
-
- def verify_positive(self, hashmod, message, public_key, salt, signature):
- prng = PRNG(salt)
- hashed = hashmod.new(message)
- verifier = pss.new(public_key, salt_bytes=len(salt), rand_func=prng)
- verifier.verify(hashed, signature)
-
- def verify_negative(self, hashmod, message, public_key, salt, signature):
- prng = PRNG(salt)
- hashed = hashmod.new(message)
- verifier = pss.new(public_key, salt_bytes=len(salt), rand_func=prng)
- self.assertRaises(ValueError, verifier.verify, hashed, signature)
-
- def test_can_sign(self):
- test_public_key = RSA.generate(1024).public_key()
- verifier = pss.new(test_public_key)
- self.assertEqual(verifier.can_sign(), False)
-
-
-class FIPS_PKCS1_Verify_Tests_KAT(unittest.TestCase):
- pass
-
-
-test_vectors_verify = load_test_vectors(("Signature", "PKCS1-PSS"),
- "SigVerPSS_186-3.rsp",
- "Signature Verification 186-3",
- {'shaalg': lambda x: x,
- 'result': lambda x: x}) or []
-
-
-for count, tv in enumerate(test_vectors_verify):
- if isinstance(tv, str):
- continue
- if hasattr(tv, "n"):
- modulus = tv.n
- continue
- if hasattr(tv, "p"):
- continue
-
- hash_module = load_hash_by_name(tv.shaalg.upper())
- hash_obj = hash_module.new(tv.msg)
- public_key = RSA.construct([bytes_to_long(x) for x in (modulus, tv.e)]) # type: ignore
- if tv.saltval != b("\x00"):
- prng = PRNG(tv.saltval)
- verifier = pss.new(public_key, salt_bytes=len(tv.saltval), rand_func=prng)
- else:
- verifier = pss.new(public_key, salt_bytes=0)
-
- def positive_test(self, hash_obj=hash_obj, verifier=verifier, signature=tv.s):
- verifier.verify(hash_obj, signature)
-
- def negative_test(self, hash_obj=hash_obj, verifier=verifier, signature=tv.s):
- self.assertRaises(ValueError, verifier.verify, hash_obj, signature)
-
- if tv.result == 'p':
- setattr(FIPS_PKCS1_Verify_Tests_KAT, "test_positive_%d" % count, positive_test)
- else:
- setattr(FIPS_PKCS1_Verify_Tests_KAT, "test_negative_%d" % count, negative_test)
-
-
-class FIPS_PKCS1_Sign_Tests(unittest.TestCase):
-
- def shortDescription(self):
- return "FIPS PKCS1 Tests (Sign)"
-
- def test_can_sign(self):
- test_private_key = RSA.generate(1024)
- signer = pss.new(test_private_key)
- self.assertEqual(signer.can_sign(), True)
-
-
-class FIPS_PKCS1_Sign_Tests_KAT(unittest.TestCase):
- pass
-
-
-test_vectors_sign = load_test_vectors(("Signature", "PKCS1-PSS"),
- "SigGenPSS_186-2.txt",
- "Signature Generation 186-2",
- {'shaalg': lambda x: x}) or []
-
-test_vectors_sign += load_test_vectors(("Signature", "PKCS1-PSS"),
- "SigGenPSS_186-3.txt",
- "Signature Generation 186-3",
- {'shaalg': lambda x: x}) or []
-
-for count, tv in enumerate(test_vectors_sign):
- if isinstance(tv, str):
- continue
- if hasattr(tv, "n"):
- modulus = tv.n
- continue
- if hasattr(tv, "e"):
- private_key = RSA.construct([bytes_to_long(x) for x in (modulus, tv.e, tv.d)]) # type: ignore
- continue
-
- hash_module = load_hash_by_name(tv.shaalg.upper())
- hash_obj = hash_module.new(tv.msg)
- if tv.saltval != b("\x00"):
- prng = PRNG(tv.saltval)
- signer = pss.new(private_key, salt_bytes=len(tv.saltval), rand_func=prng)
- else:
- signer = pss.new(private_key, salt_bytes=0)
-
- def new_test(self, hash_obj=hash_obj, signer=signer, result=tv.s):
- signature = signer.sign(hash_obj)
- self.assertEqual(signature, result)
-
- setattr(FIPS_PKCS1_Sign_Tests_KAT, "test_%d" % count, new_test)
-
-
-class PKCS1_Legacy_Module_Tests(unittest.TestCase):
- """Verify that the legacy module Crypto.Signature.PKCS1_PSS
- behaves as expected. The only difference is that the verify()
- method returns True/False and does not raise exceptions."""
-
- def shortDescription(self):
- return "Test legacy Crypto.Signature.PKCS1_PSS"
-
- def runTest(self):
- key = RSA.generate(1024)
- hashed = SHA1.new(b("Test"))
- good_signature = PKCS1_PSS.new(key).sign(hashed)
- verifier = PKCS1_PSS.new(key.public_key())
-
- self.assertEqual(verifier.verify(hashed, good_signature), True)
-
- # Flip a few bits in the signature
- bad_signature = strxor(good_signature, bchr(1) * len(good_signature))
- self.assertEqual(verifier.verify(hashed, bad_signature), False)
-
-
-class PKCS1_All_Hashes_Tests(unittest.TestCase):
-
- def shortDescription(self):
- return "Test PKCS#1 PSS signature in combination with all hashes"
-
- def runTest(self):
-
- key = RSA.generate(1280)
- signer = pss.new(key)
- hash_names = ("MD2", "MD4", "MD5", "RIPEMD160", "SHA1",
- "SHA224", "SHA256", "SHA384", "SHA512",
- "SHA3_224", "SHA3_256", "SHA3_384", "SHA3_512")
-
- for name in hash_names:
- hashed = load_hash_by_name(name).new(b("Test"))
- signer.sign(hashed)
-
- from Crypto.Hash import BLAKE2b, BLAKE2s
- for hash_size in (20, 32, 48, 64):
- hashed_b = BLAKE2b.new(digest_bytes=hash_size, data=b("Test"))
- signer.sign(hashed_b)
- for hash_size in (16, 20, 28, 32):
- hashed_s = BLAKE2s.new(digest_bytes=hash_size, data=b("Test"))
- signer.sign(hashed_s)
-
-
-def get_hash_module(hash_name):
- if hash_name == "SHA-512":
- hash_module = SHA512
- elif hash_name == "SHA-512/224":
- hash_module = SHA512.new(truncate="224")
- elif hash_name == "SHA-512/256":
- hash_module = SHA512.new(truncate="256")
- elif hash_name == "SHA-384":
- hash_module = SHA384
- elif hash_name == "SHA-256":
- hash_module = SHA256
- elif hash_name == "SHA-224":
- hash_module = SHA224
- elif hash_name == "SHA-1":
- hash_module = SHA1
- else:
- raise ValueError("Unknown hash algorithm: " + hash_name)
- return hash_module
-
-
-class TestVectorsPSSWycheproof(unittest.TestCase):
-
- def __init__(self, wycheproof_warnings):
- unittest.TestCase.__init__(self)
- self._wycheproof_warnings = wycheproof_warnings
- self._id = "None"
-
- def add_tests(self, filename):
-
- def filter_rsa(group):
- return RSA.import_key(group['keyPem'])
-
- def filter_sha(group):
- return get_hash_module(group['sha'])
-
- def filter_type(group):
- type_name = group['type']
- if type_name not in ("RsassaPssVerify", ):
- raise ValueError("Unknown type name " + type_name)
-
- def filter_slen(group):
- return group['sLen']
-
- def filter_mgf(group):
- mgf = group['mgf']
- if mgf not in ("MGF1", ):
- raise ValueError("Unknown MGF " + mgf)
- mgf1_hash = get_hash_module(group['mgfSha'])
-
- def mgf(x, y, mh=mgf1_hash):
- return MGF1(x, y, mh)
-
- return mgf
-
- result = load_test_vectors_wycheproof(("Signature", "wycheproof"),
- filename,
- "Wycheproof PSS signature (%s)" % filename,
- group_tag={'key': filter_rsa,
- 'hash_module': filter_sha,
- 'sLen': filter_slen,
- 'mgf': filter_mgf,
- 'type': filter_type})
- return result
-
- def setUp(self):
- self.tv = []
- self.add_tests("rsa_pss_2048_sha1_mgf1_20_test.json")
- self.add_tests("rsa_pss_2048_sha256_mgf1_0_test.json")
- self.add_tests("rsa_pss_2048_sha256_mgf1_32_test.json")
- self.add_tests("rsa_pss_2048_sha512_256_mgf1_28_test.json")
- self.add_tests("rsa_pss_2048_sha512_256_mgf1_32_test.json")
- self.add_tests("rsa_pss_3072_sha256_mgf1_32_test.json")
- self.add_tests("rsa_pss_4096_sha256_mgf1_32_test.json")
- self.add_tests("rsa_pss_4096_sha512_mgf1_32_test.json")
- self.add_tests("rsa_pss_misc_test.json")
-
- def shortDescription(self):
- return self._id
-
- def warn(self, tv):
- if tv.warning and self._wycheproof_warnings:
- import warnings
- warnings.warn("Wycheproof warning: %s (%s)" % (self._id, tv.comment))
-
- def test_verify(self, tv):
- self._id = "Wycheproof RSA PSS Test #%d (%s)" % (tv.id, tv.comment)
-
- hashed_msg = tv.hash_module.new(tv.msg)
- signer = pss.new(tv.key, mask_func=tv.mgf, salt_bytes=tv.sLen)
- try:
- signature = signer.verify(hashed_msg, tv.sig)
- except ValueError as e:
- if tv.warning:
- return
- assert not tv.valid
- else:
- assert tv.valid
- self.warn(tv)
-
- def runTest(self):
- for tv in self.tv:
- self.test_verify(tv)
-
-
-def get_tests(config={}):
- wycheproof_warnings = config.get('wycheproof_warnings')
-
- tests = []
- tests += list_test_cases(PSS_Tests)
- tests += list_test_cases(FIPS_PKCS1_Verify_Tests)
- tests += list_test_cases(FIPS_PKCS1_Sign_Tests)
- tests += list_test_cases(PKCS1_Legacy_Module_Tests)
- tests += list_test_cases(PKCS1_All_Hashes_Tests)
-
- if config.get('slow_tests'):
- tests += list_test_cases(FIPS_PKCS1_Verify_Tests_KAT)
- tests += list_test_cases(FIPS_PKCS1_Sign_Tests_KAT)
-
- tests += [TestVectorsPSSWycheproof(wycheproof_warnings)]
-
- return tests
-
-
-if __name__ == '__main__':
- def suite():
- return unittest.TestSuite(get_tests())
- unittest.main(defaultTest='suite')
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/seattle_weather_interactive.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/seattle_weather_interactive.py
deleted file mode 100644
index cf65fbffda1b38cb11c493bc76664f3a9931b1ce..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/seattle_weather_interactive.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""
-Seattle Weather Interactive
-===========================
-This chart provides an interactive exploration of Seattle weather over the
-course of the year. It includes a one-axis brush selection to easily
-see the distribution of weather types in a particular date range.
-"""
-# category: case studies
-import altair as alt
-from vega_datasets import data
-
-source = data.seattle_weather()
-
-scale = alt.Scale(domain=['sun', 'fog', 'drizzle', 'rain', 'snow'],
- range=['#e7ba52', '#a7a7a7', '#aec7e8', '#1f77b4', '#9467bd'])
-color = alt.Color('weather:N', scale=scale)
-
-# We create two selections:
-# - a brush that is active on the top panel
-# - a multi-click that is active on the bottom panel
-brush = alt.selection_interval(encodings=['x'])
-click = alt.selection_multi(encodings=['color'])
-
-# Top panel is scatter plot of temperature vs time
-points = alt.Chart().mark_point().encode(
- alt.X('monthdate(date):T', title='Date'),
- alt.Y('temp_max:Q',
- title='Maximum Daily Temperature (C)',
- scale=alt.Scale(domain=[-5, 40])
- ),
- color=alt.condition(brush, color, alt.value('lightgray')),
- size=alt.Size('precipitation:Q', scale=alt.Scale(range=[5, 200]))
-).properties(
- width=550,
- height=300
-).add_selection(
- brush
-).transform_filter(
- click
-)
-
-# Bottom panel is a bar chart of weather type
-bars = alt.Chart().mark_bar().encode(
- x='count()',
- y='weather:N',
- color=alt.condition(click, color, alt.value('lightgray')),
-).transform_filter(
- brush
-).properties(
- width=550,
-).add_selection(
- click
-)
-
-alt.vconcat(
- points,
- bars,
- data=source,
- title="Seattle Weather: 2012-2015"
-)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/__init__.py
deleted file mode 100644
index 6e81178c81e063691c7d0f7a966afbaf9ed7934b..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/__init__.py
+++ /dev/null
@@ -1,167 +0,0 @@
-__all__ = (
- "maybe_async",
- "maybe_async_cm",
- "run",
- "sleep",
- "sleep_forever",
- "sleep_until",
- "current_time",
- "get_all_backends",
- "get_cancelled_exc_class",
- "BrokenResourceError",
- "BrokenWorkerProcess",
- "BusyResourceError",
- "ClosedResourceError",
- "DelimiterNotFound",
- "EndOfStream",
- "ExceptionGroup",
- "IncompleteRead",
- "TypedAttributeLookupError",
- "WouldBlock",
- "AsyncFile",
- "Path",
- "open_file",
- "wrap_file",
- "aclose_forcefully",
- "open_signal_receiver",
- "connect_tcp",
- "connect_unix",
- "create_tcp_listener",
- "create_unix_listener",
- "create_udp_socket",
- "create_connected_udp_socket",
- "getaddrinfo",
- "getnameinfo",
- "wait_socket_readable",
- "wait_socket_writable",
- "create_memory_object_stream",
- "run_process",
- "open_process",
- "create_lock",
- "CapacityLimiter",
- "CapacityLimiterStatistics",
- "Condition",
- "ConditionStatistics",
- "Event",
- "EventStatistics",
- "Lock",
- "LockStatistics",
- "Semaphore",
- "SemaphoreStatistics",
- "create_condition",
- "create_event",
- "create_semaphore",
- "create_capacity_limiter",
- "open_cancel_scope",
- "fail_after",
- "move_on_after",
- "current_effective_deadline",
- "TASK_STATUS_IGNORED",
- "CancelScope",
- "create_task_group",
- "TaskInfo",
- "get_current_task",
- "get_running_tasks",
- "wait_all_tasks_blocked",
- "run_sync_in_worker_thread",
- "run_async_from_thread",
- "run_sync_from_thread",
- "current_default_worker_thread_limiter",
- "create_blocking_portal",
- "start_blocking_portal",
- "typed_attribute",
- "TypedAttributeSet",
- "TypedAttributeProvider",
-)
-
-from typing import Any
-
-from ._core._compat import maybe_async, maybe_async_cm
-from ._core._eventloop import (
- current_time,
- get_all_backends,
- get_cancelled_exc_class,
- run,
- sleep,
- sleep_forever,
- sleep_until,
-)
-from ._core._exceptions import (
- BrokenResourceError,
- BrokenWorkerProcess,
- BusyResourceError,
- ClosedResourceError,
- DelimiterNotFound,
- EndOfStream,
- ExceptionGroup,
- IncompleteRead,
- TypedAttributeLookupError,
- WouldBlock,
-)
-from ._core._fileio import AsyncFile, Path, open_file, wrap_file
-from ._core._resources import aclose_forcefully
-from ._core._signals import open_signal_receiver
-from ._core._sockets import (
- connect_tcp,
- connect_unix,
- create_connected_udp_socket,
- create_tcp_listener,
- create_udp_socket,
- create_unix_listener,
- getaddrinfo,
- getnameinfo,
- wait_socket_readable,
- wait_socket_writable,
-)
-from ._core._streams import create_memory_object_stream
-from ._core._subprocesses import open_process, run_process
-from ._core._synchronization import (
- CapacityLimiter,
- CapacityLimiterStatistics,
- Condition,
- ConditionStatistics,
- Event,
- EventStatistics,
- Lock,
- LockStatistics,
- Semaphore,
- SemaphoreStatistics,
- create_capacity_limiter,
- create_condition,
- create_event,
- create_lock,
- create_semaphore,
-)
-from ._core._tasks import (
- TASK_STATUS_IGNORED,
- CancelScope,
- create_task_group,
- current_effective_deadline,
- fail_after,
- move_on_after,
- open_cancel_scope,
-)
-from ._core._testing import (
- TaskInfo,
- get_current_task,
- get_running_tasks,
- wait_all_tasks_blocked,
-)
-from ._core._typedattr import TypedAttributeProvider, TypedAttributeSet, typed_attribute
-
-# Re-exported here, for backwards compatibility
-# isort: off
-from .to_thread import current_default_worker_thread_limiter, run_sync_in_worker_thread
-from .from_thread import (
- create_blocking_portal,
- run_async_from_thread,
- run_sync_from_thread,
- start_blocking_portal,
-)
-
-# Re-export imports so they look like they live directly in this package
-key: str
-value: Any
-for key, value in list(locals().items()):
- if getattr(value, "__module__", "").startswith("anyio."):
- value.__module__ = __name__
diff --git a/spaces/aseduto/sp500/README.md b/spaces/aseduto/sp500/README.md
deleted file mode 100644
index cd2d0d9f416f6837a8354dfb5bd43fd43bbb9c97..0000000000000000000000000000000000000000
--- a/spaces/aseduto/sp500/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
----
-title: S&P 500
-emoji: 📉 📈
-colorFrom: blue
-colorTo: green
-sdk: streamlit
-sdk_version: 1.24.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-# Artificial Intelligence - S&P 500 Prediction Model using PyTorch
-
-This is a model using PyTorch that predicts the value of the S&P 500 index based on earnings and interest rates.
-
-## Introduction
-
-The model takes into account historical earnings per share (EPS) and treasury rates to make predictions on the S&P 500 index. It has been trained on a significant amount of data and has shown promising results during the testing phase.
-
-## Data and Training
-
-The model has been trained on a comprehensive dataset of historical EPS and treasury rates from 1997 to today.
-
-
-## Feedback
-
-I believe this model can provide valuable insights into the behavior of the stock market and assist in making informed investment decisions. However, I'm still working to further improve the model and explore potential areas of enhancement.
-
-## How to Use the Model
-
-It is important to note that no model can perfectly predict the future value of the S&P 500 index. However, these models can be used to generate forecasts that can help investors make informed decisions.
-
-You can input your expected values for the treasury rate and the S&P Earnings for each future quarters and have the model forecast the index expected value.
-
-## Contact
-
-If you have any questions, suggestions, or ideas, please feel free to reach out. Your feedback is greatly appreciated!
-
-
-
diff --git a/spaces/aus10powell/TwitterAccounts/app.py b/spaces/aus10powell/TwitterAccounts/app.py
deleted file mode 100644
index 454bcb949fd113be7865e74f8ac92b3f4c2d77ca..0000000000000000000000000000000000000000
--- a/spaces/aus10powell/TwitterAccounts/app.py
+++ /dev/null
@@ -1,328 +0,0 @@
-"""FastAPI endpoint
-To run locally use 'uvicorn app:app --host localhost --port 7860'
-or
-`python -m uvicorn app:app --reload --host localhost --port 7860`
-"""
-import datetime as dt
-import json
-import logging
-import sys
-import spacy
-
-# sys.setrecursionlimit(20000)
-import pandas as pd
-import numpy as np
-import os
-import random
-from typing import Dict, List
-
-import uvicorn
-from fastapi import FastAPI, HTTPException, Request, Response
-from fastapi.responses import HTMLResponse, JSONResponse
-from fastapi.staticfiles import StaticFiles
-from fastapi.templating import Jinja2Templates
-
-from rouge_score import rouge_scorer
-# Scripts
-import scripts.sentiment as sentiment
-import scripts.twitter_scraper as ts
-from scripts import sentiment
-from scripts.summarization import bert_summarization
-from scripts.twitter_scraper import get_latest_account_tweets
-from scripts.sentiment import twitter_sentiment_api_score
-from scripts import twitter_scraper as ts
-import scripts.utils as utils
-from scripts import translation
-from scripts import generative
-import nltk
-nltk.download('punkt')
-punkt_download_location = nltk.data.path[0]
-logging.info(f"punkt_download_location: {punkt_download_location}")
-
-logging.basicConfig(level=logging.INFO)
-pd.set_option('display.max_colwidth', 20)
-
-app = FastAPI()
-templates = Jinja2Templates(directory="templates")
-app.mount("/static", StaticFiles(directory="static"), name="static")
-
-# Construct absolute path to models folder
-models_path = os.path.abspath("models")
-
-username_list = [
- "alikarimi_ak8",
- "elonmusk",
- "BarackObama",
- "taylorlorenz",
- "cathiedwood",
- "ylecun",
-]
-
-## Static objects/paths
-start_date = dt.date(year=2023, month=2, day=1)
-end_date = dt.date(year=2023, month=3, day=22)
-
-# Load spacy module on app start
-nlp = spacy.load("en_core_web_sm")
-nlp.add_pipe("sentencizer")
-
-
-@app.get("/", response_class=HTMLResponse)
-async def webpage(request: Request):
- return templates.TemplateResponse("index.html", {"request": request})
-
-
-@app.get("/accounts")
-async def get_accounts() -> List[dict]:
- import pandas as pd
-
- logging.info(f"Pulling account information on {username_list}")
- account_info_list = [
- ts.get_twitter_account_info(twitter_handle=account) for account in username_list
- ]
- df_account = pd.DataFrame(account_info_list)
- df_account = df_account.style.bar(
- subset=["follower_count", "friends_count"], color="#d65f5f"
- )
- df_account = df_account.format(
- {"follower_count": "{:,.0f}", "friends_count": "{:,.0f}"}
- )
- html_table = df_account.to_html(classes="center", index=False)
-
- return HTMLResponse(content=html_table, status_code=200)
-
-
-@app.get("/tweets/{username}")
-def get_tweets_username(username: str) -> dict:
- # Method 2: Use Snscrape
- df_tweets = ts.get_tweets(handle=username)
-
- if isinstance(df_tweets, pd.DataFrame):
- df_tweets = df_tweets[["handle", "created_at","retweet_count","view_count","like_count", "full_text"]]
- df_tweets["created_at"] = df_tweets["created_at"].dt.strftime(
- "%Y-%m-%d %H:%M:%S"
- )
- df_tweets = df_tweets.sort_values("created_at", ascending=False)
-
- # Additional processing
- logging.info("Running sentiment on tweets")
- sentiments = twitter_sentiment_api_score(
- df_tweets['full_text'].to_list(), use_api=False
- )
- df_tweets["sentiment"] = [s['argmax'] for s in sentiments]
- if username == "alikarimi_ak8":
- p = translation.PersianTextProcessor()
- df_tweets['full_text_translated'] = df_tweets["full_text"].apply(lambda c: p.translate_text(persian_text = c))
-
-
- df_tweets_html = df_tweets.to_html(classes="center", index=False, escape=False)
- df_tweets.to_html(open("df_tweets_html.html", "w"))
- df_tweets_data = df_tweets.to_dict(orient="records")
-
- response_data = {"html": df_tweets_html, "data": df_tweets_data}
-
- return JSONResponse(content=response_data, status_code=200)
- else:
- print("Error: Failed to retrieve tweets.")
- return df_tweets
-
-
-@app.get("/audience/{username}", response_model=dict)
-async def get_audience(username: str) -> dict:
- if username in username_list:
- query = f"from:{username} since:{start_date} until:{end_date}"
- tweets = ts.get_tweets(query=query)
-
- n_samples = 5
- # Random sample 3 tweets from user
- tweets_sampled = random.sample(tweets, n_samples)
-
- # Get all replies to sampled tweets
- tweet_threads = []
- for tweet in tweets_sampled:
- threads = ts.get_replies(
- username=tweet["username"],
- conversation_id=tweet["conversation_id"],
- max_tweets=100,
- )
- tweet_threads += threads
-
- # Get usernames from sample threads tweets
- usernames = [t["username"] for t in tweet_threads]
- # Get user info from sample replies to sampled tweets of user
- info_accounts = [
- ts.get_twitter_account_info(twitter_handle=account) for account in usernames
- ]
-
- # "follower_count":1,"friends_count":20,"verified":false}
- # Get stats for followers/audience engaging with tweets
- follower_counts = [
- info_accounts[i]["follower_count"] for i in range(len(info_accounts))
- ]
- friends_counts = [
- info_accounts[i]["friends_count"] for i in range(len(info_accounts))
- ]
- verified_counts = [
- 1 if info_accounts[i]["verified"] == True else 0
- for i in range(len(info_accounts))
- ]
- return {
- "sample_size": len(info_accounts),
- "mean_follower_count": round(np.mean(follower_counts), 3),
- "mean_friends_count": round(np.mean(friends_counts), 3),
- "mean_verified": round(np.mean(verified_counts), 3),
- }
- else:
- response = Response(content="Account not in scope of project.", status_code=404)
- return response
-
-
-@app.get("/sentiment/{username}")
-async def get_sentiment(username: str) -> Dict[str, Dict[str, float]]:
- if username not in username_list:
- raise HTTPException(status_code=404, detail="Account not in scope of project.")
-
- query = f"from:{username} since:{start_date} until:{end_date}"
- tweets = ts.get_tweets(query=query)
- n_samples = 5
- tweets_sampled = random.sample(tweets, n_samples)
-
- tweet_threads = []
- for tweet in tweets_sampled:
- threads = ts.get_replies(
- username=tweet["username"],
- conversation_id=tweet["conversation_id"],
- max_tweets=100,
- )
- tweet_threads += threads
-
- print(
- f"Total replies to {n_samples} sampled tweets from username: {username}, {len(tweet_threads)}"
- )
-
- ## Sentiment scoring
- print(f"Running tweet sentiment scoring on username: {username} tweets")
- tweets_scores = sentiment.get_tweets_sentiment(tweets=tweets)
- mean_tweets_score = round(np.mean(tweets_scores), 2)
- ci_tweets = utils.wilson_score_interval(tweets_scores)
-
- # Get sentiment of the threads from tweets
- # Get username tweets sentiment
- print(f"Running tweet thread sentiment scoring on username: {username} tweets")
- threads_scores = sentiment.get_tweets_sentiment(tweets=tweet_threads)
- mean_threads_score = round(np.mean(threads_scores), 2)
- ci_threads = utils.wilson_score_interval(threads_scores)
-
- return {
- "thread_level": {
- "mean": mean_threads_score,
- "confidence_interal": ci_threads,
- },
- "audience_level": {
- "mean": mean_tweets_score,
- "confidence_interval": ci_tweets,
- },
- }
-
-
-## APIs: Primarily called by the index page
-@app.post("/api/generate")
-async def generate_text(request: Request):
- """Generate text from a prompt.
-
- Args:
- request: The HTTP request.
-
- Returns:
- The generated text.
- """
- print("*" * 50)
- data = await request.json()
- print("*" * 50)
- logging.info("POST to api/generate received and processing")
-
- # Check length of input, if it is greater than 10 tokens, the text is sent off to a summarizer to generate:
- try:
- generated_text = generative.generate_account_text(
- prompt=data["text"], model_dir=os.path.join(models_path, data["account"])
- )
- logging.info("INFO: Successfully generate text from model.")
- except Exception as e:
- logging.error(f"Error generating text: {e}")
- return {"error": "Error generating text"}
- # return one example
- generated_text = generated_text[0]["generated_text"]
-
- ###################################################
- ## Clean up generate text
- # Get rid of final sentence
- sentences = nltk.sent_tokenize(generated_text)
- unique_sentences = set()
- non_duplicate_sentences = []
- for sentence in sentences:
- if sentence not in unique_sentences:
- non_duplicate_sentences.append(sentence)
- unique_sentences.add(sentence)
- final_text = " ".join(non_duplicate_sentences[:-1])
-
- return {"generated_text": final_text}
-
-
-@app.post("/api/generate_summary")
-async def generate_summary(request: Request):
- """Generate summary from tweets
-
- Args:
- request: The HTTP request.
-
- Returns:
- The generated text.
- """
-
- print("*" * 50)
- data = await request.json()
- print("data", data["tweetsData"])
- # Get the list of text
- tweets = [t["full_text"] for t in data["tweetsData"]]
-
- # Concatenate tweets into a single string
- text = " .".join(tweets)
-
- sentences = nlp(text).sents
-
- sentences = list(sentences)
-
- # Option 2
- sampled_sentences = random.sample(sentences, int(0.1 * len(sentences)))
-
- sampled_sentences = [sentiment.tweet_cleaner(s.text) for s in sampled_sentences]
-
- # Join the strings into one text blob
- tweet_blob = " ".join(sampled_sentences)
-
- # Generate the summary
- summary = bert_summarization(tweet_blob)
- print("Summary:", summary)
- # Return the summary
- return {"tweets_summary": summary}
-
-
-## Historical Tweets pages
-@app.get("/examples1")
-async def read_examples():
- with open("templates/charts/handle_sentiment_breakdown.html") as f:
- html = f.read()
- return HTMLResponse(content=html)
-
-
-@app.get("/examples2")
-async def read_examples():
- with open("templates/charts/handle_sentiment_timesteps.html") as f:
- html = f.read()
- return HTMLResponse(content=html)
-
-
-# uvicorn --workers=2 app:app
-if __name__ == "__main__":
- # uvicorn.run(app, host="0.0.0.0", port=8000)
- uvicorn.run("app:app", host="127.0.0.1", port=5050, reload=True)
diff --git a/spaces/awacke1/ChatGPTStreamlit9/README.md b/spaces/awacke1/ChatGPTStreamlit9/README.md
deleted file mode 100644
index 322f025775f6268ebeffc28dac25e0cd0ddf8bac..0000000000000000000000000000000000000000
--- a/spaces/awacke1/ChatGPTStreamlit9/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ChatGPTStreamlit9
-emoji: 🌖
-colorFrom: blue
-colorTo: green
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/balgot/text-to-stylegan3/README.md b/spaces/balgot/text-to-stylegan3/README.md
deleted file mode 100644
index d49bc65247dfa031716a5821ffccad811d285fac..0000000000000000000000000000000000000000
--- a/spaces/balgot/text-to-stylegan3/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
----
-title: Text To Stylegan3
-emoji: 🔥
-colorFrom: blue
-colorTo: purple
-sdk: gradio
-sdk_version: 3.28.1
-app_file: app.py
-pinned: false
-license: openrail
----
-
-This is a demo showcasing the connection of these models:
-
-* [`sentence-transformers/all-MiniLM-L6-v2`](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)
-* [`balgot/text-2-stylegan3`](https://huggingface.co/balgot/bert-2-stylegan3)
-* [`StyleGAN3`](https://nvlabs.github.io/stylegan3/)
-
-> **NOTE:** It is only possible to generate human faces which StyleGAN3 can
-generate (see [ffhq](https://github.com/NVlabs/ffhq-dataset)) dataset.
-Specifically, only real life human faces can be generated. Furthermore,
-the quality of understanding the textual description strongly depends
-on the captioning procedure used, so not all face-features can be forced.
\ No newline at end of file
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/extras/ShapeUtils.js b/spaces/banana-projects/web3d/node_modules/three/src/extras/ShapeUtils.js
deleted file mode 100644
index ec050afb571b57ccbbefe2db216f38ff4027c81a..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/extras/ShapeUtils.js
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * @author zz85 / http://www.lab4games.net/zz85/blog
- */
-
-import { Earcut } from './Earcut.js';
-
-var ShapeUtils = {
-
- // calculate area of the contour polygon
-
- area: function ( contour ) {
-
- var n = contour.length;
- var a = 0.0;
-
- for ( var p = n - 1, q = 0; q < n; p = q ++ ) {
-
- a += contour[ p ].x * contour[ q ].y - contour[ q ].x * contour[ p ].y;
-
- }
-
- return a * 0.5;
-
- },
-
- isClockWise: function ( pts ) {
-
- return ShapeUtils.area( pts ) < 0;
-
- },
-
- triangulateShape: function ( contour, holes ) {
-
- var vertices = []; // flat array of vertices like [ x0,y0, x1,y1, x2,y2, ... ]
- var holeIndices = []; // array of hole indices
- var faces = []; // final array of vertex indices like [ [ a,b,d ], [ b,c,d ] ]
-
- removeDupEndPts( contour );
- addContour( vertices, contour );
-
- //
-
- var holeIndex = contour.length;
-
- holes.forEach( removeDupEndPts );
-
- for ( var i = 0; i < holes.length; i ++ ) {
-
- holeIndices.push( holeIndex );
- holeIndex += holes[ i ].length;
- addContour( vertices, holes[ i ] );
-
- }
-
- //
-
- var triangles = Earcut.triangulate( vertices, holeIndices );
-
- //
-
- for ( var i = 0; i < triangles.length; i += 3 ) {
-
- faces.push( triangles.slice( i, i + 3 ) );
-
- }
-
- return faces;
-
- }
-
-};
-
-function removeDupEndPts( points ) {
-
- var l = points.length;
-
- if ( l > 2 && points[ l - 1 ].equals( points[ 0 ] ) ) {
-
- points.pop();
-
- }
-
-}
-
-function addContour( vertices, contour ) {
-
- for ( var i = 0; i < contour.length; i ++ ) {
-
- vertices.push( contour[ i ].x );
- vertices.push( contour[ i ].y );
-
- }
-
-}
-
-export { ShapeUtils };
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/map_particle_pars_fragment.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/map_particle_pars_fragment.glsl.js
deleted file mode 100644
index 3bf8eb007280ef6c7ad0be15171cf1944467d86d..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/map_particle_pars_fragment.glsl.js
+++ /dev/null
@@ -1,8 +0,0 @@
-export default /* glsl */`
-#ifdef USE_MAP
-
- uniform mat3 uvTransform;
- uniform sampler2D map;
-
-#endif
-`;
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/models/video_recurrent_gan_model.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/models/video_recurrent_gan_model.py
deleted file mode 100644
index 74cf81145c50ffafb220d22b51e56746dee5ba41..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/models/video_recurrent_gan_model.py
+++ /dev/null
@@ -1,180 +0,0 @@
-import torch
-from collections import OrderedDict
-
-from basicsr.archs import build_network
-from basicsr.losses import build_loss
-from basicsr.utils import get_root_logger
-from basicsr.utils.registry import MODEL_REGISTRY
-from .video_recurrent_model import VideoRecurrentModel
-
-
-@MODEL_REGISTRY.register()
-class VideoRecurrentGANModel(VideoRecurrentModel):
-
- def init_training_settings(self):
- train_opt = self.opt['train']
-
- self.ema_decay = train_opt.get('ema_decay', 0)
- if self.ema_decay > 0:
- logger = get_root_logger()
- logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}')
- # build network net_g with Exponential Moving Average (EMA)
- # net_g_ema only used for testing on one GPU and saving.
- # There is no need to wrap with DistributedDataParallel
- self.net_g_ema = build_network(self.opt['network_g']).to(self.device)
- # load pretrained model
- load_path = self.opt['path'].get('pretrain_network_g', None)
- if load_path is not None:
- self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
- else:
- self.model_ema(0) # copy net_g weight
- self.net_g_ema.eval()
-
- # define network net_d
- self.net_d = build_network(self.opt['network_d'])
- self.net_d = self.model_to_device(self.net_d)
- self.print_network(self.net_d)
-
- # load pretrained models
- load_path = self.opt['path'].get('pretrain_network_d', None)
- if load_path is not None:
- param_key = self.opt['path'].get('param_key_d', 'params')
- self.load_network(self.net_d, load_path, self.opt['path'].get('strict_load_d', True), param_key)
-
- self.net_g.train()
- self.net_d.train()
-
- # define losses
- if train_opt.get('pixel_opt'):
- self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
- else:
- self.cri_pix = None
-
- if train_opt.get('perceptual_opt'):
- self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device)
- else:
- self.cri_perceptual = None
-
- if train_opt.get('gan_opt'):
- self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device)
-
- self.net_d_iters = train_opt.get('net_d_iters', 1)
- self.net_d_init_iters = train_opt.get('net_d_init_iters', 0)
-
- # set up optimizers and schedulers
- self.setup_optimizers()
- self.setup_schedulers()
-
- def setup_optimizers(self):
- train_opt = self.opt['train']
- if train_opt['fix_flow']:
- normal_params = []
- flow_params = []
- for name, param in self.net_g.named_parameters():
- if 'spynet' in name: # The fix_flow now only works for spynet.
- flow_params.append(param)
- else:
- normal_params.append(param)
-
- optim_params = [
- { # add flow params first
- 'params': flow_params,
- 'lr': train_opt['lr_flow']
- },
- {
- 'params': normal_params,
- 'lr': train_opt['optim_g']['lr']
- },
- ]
- else:
- optim_params = self.net_g.parameters()
-
- # optimizer g
- optim_type = train_opt['optim_g'].pop('type')
- self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g'])
- self.optimizers.append(self.optimizer_g)
- # optimizer d
- optim_type = train_opt['optim_d'].pop('type')
- self.optimizer_d = self.get_optimizer(optim_type, self.net_d.parameters(), **train_opt['optim_d'])
- self.optimizers.append(self.optimizer_d)
-
- def optimize_parameters(self, current_iter):
- logger = get_root_logger()
- # optimize net_g
- for p in self.net_d.parameters():
- p.requires_grad = False
-
- if self.fix_flow_iter:
- if current_iter == 1:
- logger.info(f'Fix flow network and feature extractor for {self.fix_flow_iter} iters.')
- for name, param in self.net_g.named_parameters():
- if 'spynet' in name or 'edvr' in name:
- param.requires_grad_(False)
- elif current_iter == self.fix_flow_iter:
- logger.warning('Train all the parameters.')
- self.net_g.requires_grad_(True)
-
- self.optimizer_g.zero_grad()
- self.output = self.net_g(self.lq)
-
- _, _, c, h, w = self.output.size()
-
- l_g_total = 0
- loss_dict = OrderedDict()
- if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters):
- # pixel loss
- if self.cri_pix:
- l_g_pix = self.cri_pix(self.output, self.gt)
- l_g_total += l_g_pix
- loss_dict['l_g_pix'] = l_g_pix
- # perceptual loss
- if self.cri_perceptual:
- l_g_percep, l_g_style = self.cri_perceptual(self.output.view(-1, c, h, w), self.gt.view(-1, c, h, w))
- if l_g_percep is not None:
- l_g_total += l_g_percep
- loss_dict['l_g_percep'] = l_g_percep
- if l_g_style is not None:
- l_g_total += l_g_style
- loss_dict['l_g_style'] = l_g_style
- # gan loss
- fake_g_pred = self.net_d(self.output.view(-1, c, h, w))
- l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
- l_g_total += l_g_gan
- loss_dict['l_g_gan'] = l_g_gan
-
- l_g_total.backward()
- self.optimizer_g.step()
-
- # optimize net_d
- for p in self.net_d.parameters():
- p.requires_grad = True
-
- self.optimizer_d.zero_grad()
- # real
- # reshape to (b*n, c, h, w)
- real_d_pred = self.net_d(self.gt.view(-1, c, h, w))
- l_d_real = self.cri_gan(real_d_pred, True, is_disc=True)
- loss_dict['l_d_real'] = l_d_real
- loss_dict['out_d_real'] = torch.mean(real_d_pred.detach())
- l_d_real.backward()
- # fake
- # reshape to (b*n, c, h, w)
- fake_d_pred = self.net_d(self.output.view(-1, c, h, w).detach())
- l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True)
- loss_dict['l_d_fake'] = l_d_fake
- loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach())
- l_d_fake.backward()
- self.optimizer_d.step()
-
- self.log_dict = self.reduce_loss_dict(loss_dict)
-
- if self.ema_decay > 0:
- self.model_ema(decay=self.ema_decay)
-
- def save(self, epoch, current_iter):
- if self.ema_decay > 0:
- self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema'])
- else:
- self.save_network(self.net_g, 'net_g', current_iter)
- self.save_network(self.net_d, 'net_d', current_iter)
- self.save_training_state(epoch, current_iter)
diff --git a/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095508.py b/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095508.py
deleted file mode 100644
index d81671aa0eda6f43052a3e8414bab78692679bb9..0000000000000000000000000000000000000000
--- a/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095508.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#-*- coding : utf-8-*-
-import base64
-from subprocess import STDOUT
-import streamlit as st
-import pandas as pd
-import camelot as cam # extracting tables from PDFs
-
-st.title("PDF Table Extractor")
-
-input_pdf = st.file_uploader(label = "", type = 'pdf')
-
-page_number = st.text_input("请填写表格所在PDF页码,eg: 3", value = 1)
-background = st.selectbox("表格线条是否隐藏",(False,True),)
-if input_pdf is not None:
- # byte object into a PDF file
- with open("input.pdf", "wb") as f:
- base64_pdf = base64.b64encode(input_pdf.read()).decode('utf-8')
- f.write(base64.b64decode(base64_pdf))
- f.close()
-
- # read the pdf and parse it using stream
- tables = cam.read_pdf("input.pdf", pages=page_number, process_background=background)
- result = pd.ExcelWriter('result.xlsx', engine='xlsxwriter')
- tables[0].to_excel(result,index=False)
- # for i in range(0,len(tables)):
- # table = tables[i].df
- # sheetname = str(i)
- # table.to_excel(result, sheetname,index=False)
-
- with open('result.xlsx','rb') as f:
- st.download_button('提取完成,点击下载!', f,file_name='result.xlsx',mime="application/vnd.ms-excel")
-
- tables_all= cam.read_pdf("input.pdf", pages=all, process_background=background)
- result_all = pd.ExcelWriter('result_all.xlsx', engine='xlsxwriter')
- for i in range(0,len(tables_all)):
- table = tables_all[i].df
- sheetname = str(i)
- table.to_excel(result_all, sheetname,index=False)
- with open('result_all.xlsx','rb') as f:
- st.download_button('一件抽取完成,点击下载!', f,file_name='result_all.xlsx',mime="application/vnd.ms-excel")
\ No newline at end of file
diff --git a/spaces/bigscience-data/filter_values_distributions/README.md b/spaces/bigscience-data/filter_values_distributions/README.md
deleted file mode 100644
index 5c5013b7595a0f899a92f1a4a3dadb9c69b4c67a..0000000000000000000000000000000000000000
--- a/spaces/bigscience-data/filter_values_distributions/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Filter Values Distributions
-emoji: 🐠
-colorFrom: yellow
-colorTo: purple
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/blastd/LimoneSorrentin/README.md b/spaces/blastd/LimoneSorrentin/README.md
deleted file mode 100644
index a9762e1c11a26508a71307d0aacd911d9c2bcc8b..0000000000000000000000000000000000000000
--- a/spaces/blastd/LimoneSorrentin/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: LimoneSorrentin
-emoji: 📉
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.0.20
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/brainblow/AI-TV/public/index.html b/spaces/brainblow/AI-TV/public/index.html
deleted file mode 100644
index 1c0597a24821e9c303ad9da37c3fdff5cfadeb6f..0000000000000000000000000000000000000000
--- a/spaces/brainblow/AI-TV/public/index.html
+++ /dev/null
@@ -1,325 +0,0 @@
-
-
- 🤖 AI-TV
-
-
-
-
-
- Loading AI-TV...
-
-
-
-
-
🤖 AI-TV
-
▶ Current channel:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/brainblow/beat_remixer/beat_manipulator/osu.py b/spaces/brainblow/beat_remixer/beat_manipulator/osu.py
deleted file mode 100644
index 8a6becc526702d28eb4f1b42fa91a252b6ff4e09..0000000000000000000000000000000000000000
--- a/spaces/brainblow/beat_remixer/beat_manipulator/osu.py
+++ /dev/null
@@ -1,244 +0,0 @@
-from . import main
-import numpy as np
-
-# L L L L L L L L L
-def generate(song, difficulties = [0.2, 0.1, 0.05, 0.025, 0.01, 0.0075, 0.005, 0.0025], lib='madmom.MultiModelSelectionProcessor', caching=True, log = True, output = '', add_peaks = True):
- # for i in difficulties:
- # if i<0.005: print(f'Difficulties < 0.005 may result in broken beatmaps, found difficulty = {i}')
- if lib.lower == 'stunlocked': add_peaks = False
-
- if not isinstance(song, main.song): song = main.song(song)
- if log is True: print(f'Using {lib}; ', end='')
-
- filename = song.path.replace('\\', '/').split('/')[-1]
- if ' - ' in filename and len(filename.split(' - '))>1:
- artist = filename.split(' - ')[0]
- title = ' - '.join(filename.split(' - ')[1:])
- else:
- artist = ''
- title = filename
-
- if caching is True:
- audio_id=hex(len(song.audio[0]))
- import os
- if not os.path.exists('beat_manipulator/beatmaps'):
- os.mkdir('beat_manipulator/beatmaps')
- cacheDir="beat_manipulator/beatmaps/" + filename + "_"+lib+"_"+audio_id+'.txt'
- try:
- beatmap=np.loadtxt(cacheDir)
- if log is True: print('loaded cached beatmap.')
- except OSError:
- if log is True:print("beatmap hasn't been generated yet. Generating...")
- beatmap = None
-
- if beatmap is None:
- if 'madmom' in lib.lower():
- from collections.abc import MutableMapping, MutableSequence
- import madmom
- assert len(song.audio[0])>song.sr*2, f'Audio file is too short, len={len(song.audio[0])} samples, or {len(song.audio[0])/song.sr} seconds. Minimum length is 2 seconds, audio below that breaks madmom processors.'
- if lib=='madmom.RNNBeatProcessor':
- proc = madmom.features.beats.RNNBeatProcessor()
- beatmap = proc(madmom.audio.signal.Signal(song.audio.T, song.sr))
- elif lib=='madmom.MultiModelSelectionProcessor':
- proc = madmom.features.beats.RNNBeatProcessor(post_processor=None)
- predictions = proc(madmom.audio.signal.Signal(song.audio.T, song.sr))
- mm_proc = madmom.features.beats.MultiModelSelectionProcessor(num_ref_predictions=None)
- beatmap= mm_proc(predictions)*song.sr
- beatmap/= np.max(beatmap)
- elif lib=='stunlocked':
- spikes = np.abs(np.gradient(np.clip(song.audio[0], -1, 1)))[:int(len(song.audio[0]) - (len(song.audio[0])%int(song.sr/100)))]
- spikes = spikes.reshape(-1, (int(song.sr/100)))
- spikes = np.asarray(list(np.max(i) for i in spikes))
- if len(beatmap) > len(spikes): beatmap = beatmap[:len(spikes)]
- elif len(spikes) > len(beatmap): spikes = spikes[:len(beatmap)]
- zeroing = 0
- for i in range(len(spikes)):
- if zeroing > 0:
- if spikes[i] <= 0.1: zeroing -=1
- spikes[i] = 0
- elif spikes[i] >= 0.1:
- spikes[i] = 1
- zeroing = 7
- if spikes[i] <= 0.1: spikes[i] = 0
- beatmap = spikes
-
- if caching is True: np.savetxt(cacheDir, beatmap)
-
- if add_peaks is True:
- spikes = np.abs(np.gradient(np.clip(song.audio[0], -1, 1)))[:int(len(song.audio[0]) - (len(song.audio[0])%int(song.sr/100)))]
- spikes = spikes.reshape(-1, (int(song.sr/100)))
- spikes = np.asarray(list(np.max(i) for i in spikes))
- if len(beatmap) > len(spikes): beatmap = beatmap[:len(spikes)]
- elif len(spikes) > len(beatmap): spikes = spikes[:len(beatmap)]
- zeroing = 0
- for i in range(len(spikes)):
- if zeroing > 0:
- if spikes[i] <= 0.1: zeroing -=1
- spikes[i] = 0
- elif spikes[i] >= 0.1:
- spikes[i] = 1
- zeroing = 7
- if spikes[i] <= 0.1: spikes[i] = 0
- else: spikes = None
-
- def _process(song: main.song, beatmap, spikes, threshold):
- '''ඞ'''
- if add_peaks is True: beatmap += spikes
- hitmap=[]
- actual_samplerate=int(song.sr/100)
- beat_middle=int(actual_samplerate/2)
- for i in range(len(beatmap)):
- if beatmap[i]>threshold: hitmap.append(i*actual_samplerate + beat_middle)
- hitmap=np.asarray(hitmap)
- clump=[]
- for i in range(len(hitmap)-1):
- #print(i, abs(song.beatmap[i]-song.beatmap[i+1]), clump)
- if abs(hitmap[i] - hitmap[i+1]) < song.sr/16 and i != len(hitmap)-2: clump.append(i)
- elif clump!=[]:
- clump.append(i)
- actual_time=hitmap[clump[0]]
- hitmap[np.array(clump)]=0
- #print(song.beatmap)
- hitmap[clump[0]]=actual_time
- clump=[]
-
- hitmap=hitmap[hitmap!=0]
- return hitmap
-
- osufile=lambda title,artist,version: ("osu file format v14\n"
- "\n"
- "[General]\n"
- f"AudioFilename: {song.path.split('/')[-1]}\n"
- "AudioLeadIn: 0\n"
- "PreviewTime: -1\n"
- "Countdown: 0\n"
- "SampleSet: Normal\n"
- "StackLeniency: 0.5\n"
- "Mode: 0\n"
- "LetterboxInBreaks: 0\n"
- "WidescreenStoryboard: 0\n"
- "\n"
- "[Editor]\n"
- "DistanceSpacing: 1.1\n"
- "BeatDivisor: 4\n"
- "GridSize: 8\n"
- "TimelineZoom: 1.6\n"
- "\n"
- "[Metadata]\n"
- f"Title:{title}\n"
- f"TitleUnicode:{title}\n"
- f"Artist:{artist}\n"
- f"ArtistUnicode:{artist}\n"
- f'Creator:{lib} + BeatManipulator\n'
- f'Version:{version} {lib}\n'
- 'Source:\n'
- 'Tags:BeatManipulator\n'
- 'BeatmapID:0\n'
- 'BeatmapSetID:-1\n'
- '\n'
- '[Difficulty]\n'
- 'HPDrainRate:4\n'
- 'CircleSize:4\n'
- 'OverallDifficulty:5\n'
- 'ApproachRate:10\n'
- 'SliderMultiplier:3.3\n'
- 'SliderTickRate:1\n'
- '\n'
- '[Events]\n'
- '//Background and Video events\n'
- '//Break Periods\n'
- '//Storyboard Layer 0 (Background)\n'
- '//Storyboard Layer 1 (Fail)\n'
- '//Storyboard Layer 2 (Pass)\n'
- '//Storyboard Layer 3 (Foreground)\n'
- '//Storyboard Layer 4 (Overlay)\n'
- '//Storyboard Sound Samples\n'
- '\n'
- '[TimingPoints]\n'
- '0,140.0,4,1,0,100,1,0\n'
- '\n'
- '\n'
- '[HitObjects]\n')
- # remove the clumps
- #print(self.beatmap)
-
- #print(self.beatmap)
-
-
- #print(len(osumap))
- #input('banana')
- import shutil, os
- if os.path.exists('beat_manipulator/temp'): shutil.rmtree('beat_manipulator/temp')
- os.mkdir('beat_manipulator/temp')
- hitmap=[]
- import random
- for difficulty in difficulties:
- for i in range(4):
- #print(i)
- this_difficulty=_process(song, beatmap, spikes, difficulty)
- hitmap.append(this_difficulty)
-
- for k in range(len(hitmap)):
- osumap=np.vstack((hitmap[k],np.zeros(len(hitmap[k])),np.zeros(len(hitmap[k])))).T
- difficulty= difficulties[k]
- for i in range(len(osumap)-1):
- if i==0:continue
- dist=(osumap[i,0]-osumap[i-1,0])*(1-(difficulty**0.3))
- if dist<1000: dist=0.005
- elif dist<2000: dist=0.01
- elif dist<3000: dist=0.015
- elif dist<4000: dist=0.02
- elif dist<5000: dist=0.25
- elif dist<6000: dist=0.35
- elif dist<7000: dist=0.45
- elif dist<8000: dist=0.55
- elif dist<9000: dist=0.65
- elif dist<10000: dist=0.75
- elif dist<12500: dist=0.85
- elif dist<15000: dist=0.95
- elif dist<20000: dist=1
- #elif dist<30000: dist=0.8
- prev_x=osumap[i-1,1]
- prev_y=osumap[i-1,2]
- if prev_x>0: prev_x=prev_x-dist*0.1
- elif prev_x<0: prev_x=prev_x+dist*0.1
- if prev_y>0: prev_y=prev_y-dist*0.1
- elif prev_y<0: prev_y=prev_y+dist*0.1
- dirx=random.uniform(-dist,dist)
- diry=dist-abs(dirx)*random.choice([-1, 1])
- if abs(prev_x+dirx)>1: dirx=-dirx
- if abs(prev_y+diry)>1: diry=-diry
- x=prev_x+dirx
- y=prev_y+diry
- #print(dirx,diry,x,y)
- #print(x>1, x<1, y>1, y<1)
- if x>1: x=0.8
- if x<-1: x=-0.8
- if y>1: y=0.8
- if y<-1: y=-0.8
- #print(dirx,diry,x,y)
- osumap[i,1]=x
- osumap[i,2]=y
-
- osumap[:,1]*=300
- osumap[:,1]+=300
- osumap[:,2]*=180
- osumap[:,2]+=220
-
- file=osufile(artist, title, difficulty)
- for j in osumap:
- #print('285,70,'+str(int(int(i)*1000/self.samplerate))+',1,0')
- file+=f'{int(j[1])},{int(j[2])},{str(int(int(j[0])*1000/song.sr))},1,0\n'
- with open(f'beat_manipulator/temp/{artist} - {title} (BeatManipulator {difficulty} {lib}].osu', 'x', encoding="utf-8") as f:
- f.write(file)
- from . import io
- import shutil, os
- shutil.copyfile(song.path, 'beat_manipulator/temp/'+filename)
- shutil.make_archive('beat_manipulator_osz', 'zip', 'beat_manipulator/temp')
- outputname = io._outputfilename(path = output, filename = song.path, suffix = ' ('+lib + ')', ext = 'osz')
- if not os.path.exists(outputname):
- os.rename('beat_manipulator_osz.zip', outputname)
- if log is True: print(f'Created `{outputname}`')
- else: print(f'{outputname} already exists!')
- shutil.rmtree('beat_manipulator/temp')
- return outputname
\ No newline at end of file
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/datasets/coco.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/datasets/coco.py
deleted file mode 100644
index ed4f7ccb20efa3b54c719783e279c381ca5d8587..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/datasets/coco.py
+++ /dev/null
@@ -1,539 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import contextlib
-import datetime
-import io
-import json
-import logging
-import numpy as np
-import os
-import shutil
-import pycocotools.mask as mask_util
-from fvcore.common.timer import Timer
-from iopath.common.file_io import file_lock
-from PIL import Image
-
-from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
-from detectron2.utils.file_io import PathManager
-
-from .. import DatasetCatalog, MetadataCatalog
-
-"""
-This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
-"""
-
-
-logger = logging.getLogger(__name__)
-
-__all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"]
-
-
-def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
- """
- Load a json file with COCO's instances annotation format.
- Currently supports instance detection, instance segmentation,
- and person keypoints annotations.
-
- Args:
- json_file (str): full path to the json file in COCO instances annotation format.
- image_root (str or path-like): the directory where the images in this json file exists.
- dataset_name (str or None): the name of the dataset (e.g., coco_2017_train).
- When provided, this function will also do the following:
-
- * Put "thing_classes" into the metadata associated with this dataset.
- * Map the category ids into a contiguous range (needed by standard dataset format),
- and add "thing_dataset_id_to_contiguous_id" to the metadata associated
- with this dataset.
-
- This option should usually be provided, unless users need to load
- the original json content and apply more processing manually.
- extra_annotation_keys (list[str]): list of per-annotation keys that should also be
- loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
- "category_id", "segmentation"). The values for these keys will be returned as-is.
- For example, the densepose annotations are loaded in this way.
-
- Returns:
- list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See
- `Using Custom Datasets `_ ) when `dataset_name` is not None.
- If `dataset_name` is None, the returned `category_ids` may be
- incontiguous and may not conform to the Detectron2 standard format.
-
- Notes:
- 1. This function does not read the image files.
- The results do not have the "image" field.
- """
- from pycocotools.coco import COCO
-
- timer = Timer()
- json_file = PathManager.get_local_path(json_file)
- with contextlib.redirect_stdout(io.StringIO()):
- coco_api = COCO(json_file)
- if timer.seconds() > 1:
- logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
-
- id_map = None
- if dataset_name is not None:
- meta = MetadataCatalog.get(dataset_name)
- cat_ids = sorted(coco_api.getCatIds())
- cats = coco_api.loadCats(cat_ids)
- # The categories in a custom json file may not be sorted.
- thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
- meta.thing_classes = thing_classes
-
- # In COCO, certain category ids are artificially removed,
- # and by convention they are always ignored.
- # We deal with COCO's id issue and translate
- # the category ids to contiguous ids in [0, 80).
-
- # It works by looking at the "categories" field in the json, therefore
- # if users' own json also have incontiguous ids, we'll
- # apply this mapping as well but print a warning.
- if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
- if "coco" not in dataset_name:
- logger.warning(
- """
-Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
-"""
- )
- id_map = {v: i for i, v in enumerate(cat_ids)}
- meta.thing_dataset_id_to_contiguous_id = id_map
-
- # sort indices for reproducible results
- img_ids = sorted(coco_api.imgs.keys())
- # imgs is a list of dicts, each looks something like:
- # {'license': 4,
- # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
- # 'file_name': 'COCO_val2014_000000001268.jpg',
- # 'height': 427,
- # 'width': 640,
- # 'date_captured': '2013-11-17 05:57:24',
- # 'id': 1268}
- imgs = coco_api.loadImgs(img_ids)
- # anns is a list[list[dict]], where each dict is an annotation
- # record for an object. The inner list enumerates the objects in an image
- # and the outer list enumerates over images. Example of anns[0]:
- # [{'segmentation': [[192.81,
- # 247.09,
- # ...
- # 219.03,
- # 249.06]],
- # 'area': 1035.749,
- # 'iscrowd': 0,
- # 'image_id': 1268,
- # 'bbox': [192.81, 224.8, 74.73, 33.43],
- # 'category_id': 16,
- # 'id': 42986},
- # ...]
- anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
- total_num_valid_anns = sum([len(x) for x in anns])
- total_num_anns = len(coco_api.anns)
- if total_num_valid_anns < total_num_anns:
- logger.warning(
- f"{json_file} contains {total_num_anns} annotations, but only "
- f"{total_num_valid_anns} of them match to images in the file."
- )
-
- if "minival" not in json_file:
- # The popular valminusminival & minival annotations for COCO2014 contain this bug.
- # However the ratio of buggy annotations there is tiny and does not affect accuracy.
- # Therefore we explicitly white-list them.
- ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
- assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
- json_file
- )
-
- imgs_anns = list(zip(imgs, anns))
- logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
-
- dataset_dicts = []
-
- ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
-
- num_instances_without_valid_segmentation = 0
-
- for (img_dict, anno_dict_list) in imgs_anns:
- record = {}
- record["file_name"] = os.path.join(image_root, img_dict["file_name"])
- record["height"] = img_dict["height"]
- record["width"] = img_dict["width"]
- image_id = record["image_id"] = img_dict["id"]
-
- objs = []
- for anno in anno_dict_list:
- # Check that the image_id in this annotation is the same as
- # the image_id we're looking at.
- # This fails only when the data parsing logic or the annotation file is buggy.
-
- # The original COCO valminusminival2014 & minival2014 annotation files
- # actually contains bugs that, together with certain ways of using COCO API,
- # can trigger this assertion.
- assert anno["image_id"] == image_id
-
- assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
-
- obj = {key: anno[key] for key in ann_keys if key in anno}
- if "bbox" in obj and len(obj["bbox"]) == 0:
- raise ValueError(
- f"One annotation of image {image_id} contains empty 'bbox' value! "
- "This json does not have valid COCO format."
- )
-
- segm = anno.get("segmentation", None)
- if segm: # either list[list[float]] or dict(RLE)
- if isinstance(segm, dict):
- if isinstance(segm["counts"], list):
- # convert to compressed RLE
- segm = mask_util.frPyObjects(segm, *segm["size"])
- else:
- # filter out invalid polygons (< 3 points)
- segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
- if len(segm) == 0:
- num_instances_without_valid_segmentation += 1
- continue # ignore this instance
- obj["segmentation"] = segm
-
- keypts = anno.get("keypoints", None)
- if keypts: # list[int]
- for idx, v in enumerate(keypts):
- if idx % 3 != 2:
- # COCO's segmentation coordinates are floating points in [0, H or W],
- # but keypoint coordinates are integers in [0, H-1 or W-1]
- # Therefore we assume the coordinates are "pixel indices" and
- # add 0.5 to convert to floating point coordinates.
- keypts[idx] = v + 0.5
- obj["keypoints"] = keypts
-
- obj["bbox_mode"] = BoxMode.XYWH_ABS
- if id_map:
- annotation_category_id = obj["category_id"]
- try:
- obj["category_id"] = id_map[annotation_category_id]
- except KeyError as e:
- raise KeyError(
- f"Encountered category_id={annotation_category_id} "
- "but this id does not exist in 'categories' of the json file."
- ) from e
- objs.append(obj)
- record["annotations"] = objs
- dataset_dicts.append(record)
-
- if num_instances_without_valid_segmentation > 0:
- logger.warning(
- "Filtered out {} instances without valid segmentation. ".format(
- num_instances_without_valid_segmentation
- )
- + "There might be issues in your dataset generation process. Please "
- "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully"
- )
- return dataset_dicts
-
-
-def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
- """
- Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
- treated as ground truth annotations and all files under "image_root" with "image_ext" extension
- as input images. Ground truth and input images are matched using file paths relative to
- "gt_root" and "image_root" respectively without taking into account file extensions.
- This works for COCO as well as some other datasets.
-
- Args:
- gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
- annotations are stored as images with integer values in pixels that represent
- corresponding semantic labels.
- image_root (str): the directory where the input images are.
- gt_ext (str): file extension for ground truth annotations.
- image_ext (str): file extension for input images.
-
- Returns:
- list[dict]:
- a list of dicts in detectron2 standard format without instance-level
- annotation.
-
- Notes:
- 1. This function does not read the image and ground truth files.
- The results do not have the "image" and "sem_seg" fields.
- """
-
- # We match input images with ground truth based on their relative filepaths (without file
- # extensions) starting from 'image_root' and 'gt_root' respectively.
- def file2id(folder_path, file_path):
- # extract relative path starting from `folder_path`
- image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
- # remove file extension
- image_id = os.path.splitext(image_id)[0]
- return image_id
-
- input_files = sorted(
- (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),
- key=lambda file_path: file2id(image_root, file_path),
- )
- gt_files = sorted(
- (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),
- key=lambda file_path: file2id(gt_root, file_path),
- )
-
- assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
-
- # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
- if len(input_files) != len(gt_files):
- logger.warn(
- "Directory {} and {} has {} and {} files, respectively.".format(
- image_root, gt_root, len(input_files), len(gt_files)
- )
- )
- input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
- gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
- intersect = list(set(input_basenames) & set(gt_basenames))
- # sort, otherwise each worker may obtain a list[dict] in different order
- intersect = sorted(intersect)
- logger.warn("Will use their intersection of {} files.".format(len(intersect)))
- input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
- gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
-
- logger.info(
- "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root)
- )
-
- dataset_dicts = []
- for (img_path, gt_path) in zip(input_files, gt_files):
- record = {}
- record["file_name"] = img_path
- record["sem_seg_file_name"] = gt_path
- dataset_dicts.append(record)
-
- return dataset_dicts
-
-
-def convert_to_coco_dict(dataset_name):
- """
- Convert an instance detection/segmentation or keypoint detection dataset
- in detectron2's standard format into COCO json format.
-
- Generic dataset description can be found here:
- https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
-
- COCO data format description can be found here:
- http://cocodataset.org/#format-data
-
- Args:
- dataset_name (str):
- name of the source dataset
- Must be registered in DatastCatalog and in detectron2's standard format.
- Must have corresponding metadata "thing_classes"
- Returns:
- coco_dict: serializable dict in COCO json format
- """
-
- dataset_dicts = DatasetCatalog.get(dataset_name)
- metadata = MetadataCatalog.get(dataset_name)
-
- # unmap the category mapping ids for COCO
- if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
- reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()}
- reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa
- else:
- reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa
-
- categories = [
- {"id": reverse_id_mapper(id), "name": name}
- for id, name in enumerate(metadata.thing_classes)
- ]
-
- logger.info("Converting dataset dicts into COCO format")
- coco_images = []
- coco_annotations = []
-
- for image_id, image_dict in enumerate(dataset_dicts):
- coco_image = {
- "id": image_dict.get("image_id", image_id),
- "width": int(image_dict["width"]),
- "height": int(image_dict["height"]),
- "file_name": str(image_dict["file_name"]),
- }
- coco_images.append(coco_image)
-
- anns_per_image = image_dict.get("annotations", [])
- for annotation in anns_per_image:
- # create a new dict with only COCO fields
- coco_annotation = {}
-
- # COCO requirement: XYWH box format for axis-align and XYWHA for rotated
- bbox = annotation["bbox"]
- if isinstance(bbox, np.ndarray):
- if bbox.ndim != 1:
- raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.")
- bbox = bbox.tolist()
- if len(bbox) not in [4, 5]:
- raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.")
- from_bbox_mode = annotation["bbox_mode"]
- to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS
- bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)
-
- # COCO requirement: instance area
- if "segmentation" in annotation:
- # Computing areas for instances by counting the pixels
- segmentation = annotation["segmentation"]
- # TODO: check segmentation type: RLE, BinaryMask or Polygon
- if isinstance(segmentation, list):
- polygons = PolygonMasks([segmentation])
- area = polygons.area()[0].item()
- elif isinstance(segmentation, dict): # RLE
- area = mask_util.area(segmentation).item()
- else:
- raise TypeError(f"Unknown segmentation type {type(segmentation)}!")
- else:
- # Computing areas using bounding boxes
- if to_bbox_mode == BoxMode.XYWH_ABS:
- bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)
- area = Boxes([bbox_xy]).area()[0].item()
- else:
- area = RotatedBoxes([bbox]).area()[0].item()
-
- if "keypoints" in annotation:
- keypoints = annotation["keypoints"] # list[int]
- for idx, v in enumerate(keypoints):
- if idx % 3 != 2:
- # COCO's segmentation coordinates are floating points in [0, H or W],
- # but keypoint coordinates are integers in [0, H-1 or W-1]
- # For COCO format consistency we substract 0.5
- # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
- keypoints[idx] = v - 0.5
- if "num_keypoints" in annotation:
- num_keypoints = annotation["num_keypoints"]
- else:
- num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
-
- # COCO requirement:
- # linking annotations to images
- # "id" field must start with 1
- coco_annotation["id"] = len(coco_annotations) + 1
- coco_annotation["image_id"] = coco_image["id"]
- coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
- coco_annotation["area"] = float(area)
- coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0))
- coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"]))
-
- # Add optional fields
- if "keypoints" in annotation:
- coco_annotation["keypoints"] = keypoints
- coco_annotation["num_keypoints"] = num_keypoints
-
- if "segmentation" in annotation:
- seg = coco_annotation["segmentation"] = annotation["segmentation"]
- if isinstance(seg, dict): # RLE
- counts = seg["counts"]
- if not isinstance(counts, str):
- # make it json-serializable
- seg["counts"] = counts.decode("ascii")
-
- coco_annotations.append(coco_annotation)
-
- logger.info(
- "Conversion finished, "
- f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}"
- )
-
- info = {
- "date_created": str(datetime.datetime.now()),
- "description": "Automatically generated COCO json file for Detectron2.",
- }
- coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None}
- if len(coco_annotations) > 0:
- coco_dict["annotations"] = coco_annotations
- return coco_dict
-
-
-def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
- """
- Converts dataset into COCO format and saves it to a json file.
- dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
-
- Args:
- dataset_name:
- reference from the config file to the catalogs
- must be registered in DatasetCatalog and in detectron2's standard format
- output_file: path of json file that will be saved to
- allow_cached: if json file is already present then skip conversion
- """
-
- # TODO: The dataset or the conversion script *may* change,
- # a checksum would be useful for validating the cached data
-
- PathManager.mkdirs(os.path.dirname(output_file))
- with file_lock(output_file):
- if PathManager.exists(output_file) and allow_cached:
- logger.warning(
- f"Using previously cached COCO format annotations at '{output_file}'. "
- "You need to clear the cache file if your dataset has been modified."
- )
- else:
- logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
- coco_dict = convert_to_coco_dict(dataset_name)
-
- logger.info(f"Caching COCO format annotations at '{output_file}' ...")
- tmp_file = output_file + ".tmp"
- with PathManager.open(tmp_file, "w") as f:
- json.dump(coco_dict, f)
- shutil.move(tmp_file, output_file)
-
-
-def register_coco_instances(name, metadata, json_file, image_root):
- """
- Register a dataset in COCO's json annotation format for
- instance detection, instance segmentation and keypoint detection.
- (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
- `instances*.json` and `person_keypoints*.json` in the dataset).
-
- This is an example of how to register a new dataset.
- You can do something similar to this function, to register new datasets.
-
- Args:
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
- metadata (dict): extra metadata associated with this dataset. You can
- leave it as an empty dict.
- json_file (str): path to the json instance annotation file.
- image_root (str or path-like): directory which contains all the images.
- """
- assert isinstance(name, str), name
- assert isinstance(json_file, (str, os.PathLike)), json_file
- assert isinstance(image_root, (str, os.PathLike)), image_root
- # 1. register a function which returns dicts
- DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
-
- # 2. Optionally, add metadata about this dataset,
- # since they might be useful in evaluation, visualization or logging
- MetadataCatalog.get(name).set(
- json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
- )
-
-
-if __name__ == "__main__":
- """
- Test the COCO json dataset loader.
-
- Usage:
- python -m detectron2.data.datasets.coco \
- path/to/json path/to/image_root dataset_name
-
- "dataset_name" can be "coco_2014_minival_100", or other
- pre-registered ones
- """
- from detectron2.utils.logger import setup_logger
- from detectron2.utils.visualizer import Visualizer
- import detectron2.data.datasets # noqa # add pre-defined metadata
- import sys
-
- logger = setup_logger(name=__name__)
- assert sys.argv[3] in DatasetCatalog.list()
- meta = MetadataCatalog.get(sys.argv[3])
-
- dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
- logger.info("Done loading {} samples.".format(len(dicts)))
-
- dirname = "coco-data-vis"
- os.makedirs(dirname, exist_ok=True)
- for d in dicts:
- img = np.array(Image.open(d["file_name"]))
- visualizer = Visualizer(img, metadata=meta)
- vis = visualizer.draw_dataset_dict(d)
- fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
- vis.save(fpath)
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/transform/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/transform/__init__.py
deleted file mode 100644
index 369e1b278899b225d55bfc729514873b4259c7b9..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/transform/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-from .image import ImageResizeTransform
diff --git a/spaces/cahodk/live-ml5-facemesh-p5js/index.html b/spaces/cahodk/live-ml5-facemesh-p5js/index.html
deleted file mode 100644
index 161ad643319020a2bfeacc26ff284eebd85e0e6e..0000000000000000000000000000000000000000
--- a/spaces/cahodk/live-ml5-facemesh-p5js/index.html
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-
-
-
-
- PoseNet example using p5.js
-
-
-
-
-
-
-
-
-
-
- PoseNet example using p5.js
- Loading model...
-
-
-
-
\ No newline at end of file
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/tools/export_onnx.py b/spaces/chendl/compositional_test/multimodal/YOLOX/tools/export_onnx.py
deleted file mode 100644
index 8703166a4ee487d2d4b713b42c6f8c55879281db..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/tools/export_onnx.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii, Inc. and its affiliates.
-
-import argparse
-import os
-from loguru import logger
-
-import torch
-from torch import nn
-
-from yolox.exp import get_exp
-from yolox.models.network_blocks import SiLU
-from yolox.utils import replace_module
-
-
-def make_parser():
- parser = argparse.ArgumentParser("YOLOX onnx deploy")
- parser.add_argument(
- "--output-name", type=str, default="yolox.onnx", help="output name of models"
- )
- parser.add_argument(
- "--input", default="images", type=str, help="input node name of onnx model"
- )
- parser.add_argument(
- "--output", default="output", type=str, help="output node name of onnx model"
- )
- parser.add_argument(
- "-o", "--opset", default=11, type=int, help="onnx opset version"
- )
- parser.add_argument("--batch-size", type=int, default=1, help="batch size")
- parser.add_argument(
- "--dynamic", action="store_true", help="whether the input shape should be dynamic or not"
- )
- parser.add_argument("--no-onnxsim", action="store_true", help="use onnxsim or not")
- parser.add_argument(
- "-f",
- "--exp_file",
- default=None,
- type=str,
- help="experiment description file",
- )
- parser.add_argument("-expn", "--experiment-name", type=str, default=None)
- parser.add_argument("-n", "--name", type=str, default=None, help="model name")
- parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path")
- parser.add_argument(
- "opts",
- help="Modify config options using the command-line",
- default=None,
- nargs=argparse.REMAINDER,
- )
- parser.add_argument(
- "--decode_in_inference",
- action="store_true",
- help="decode in inference or not"
- )
-
- return parser
-
-
-@logger.catch
-def main():
- args = make_parser().parse_args()
- logger.info("args value: {}".format(args))
- exp = get_exp(args.exp_file, args.name)
- exp.merge(args.opts)
-
- if not args.experiment_name:
- args.experiment_name = exp.exp_name
-
- model = exp.get_model()
- if args.ckpt is None:
- file_name = os.path.join(exp.output_dir, args.experiment_name)
- ckpt_file = os.path.join(file_name, "best_ckpt.pth")
- else:
- ckpt_file = args.ckpt
-
- # load the model state dict
- ckpt = torch.load(ckpt_file, map_location="cpu")
-
- model.eval()
- if "model" in ckpt:
- ckpt = ckpt["model"]
- model.load_state_dict(ckpt)
- model = replace_module(model, nn.SiLU, SiLU)
- model.head.decode_in_inference = args.decode_in_inference
-
- logger.info("loading checkpoint done.")
- dummy_input = torch.randn(args.batch_size, 3, exp.test_size[0], exp.test_size[1])
-
- torch.onnx._export(
- model,
- dummy_input,
- args.output_name,
- input_names=[args.input],
- output_names=[args.output],
- dynamic_axes={args.input: {0: 'batch'},
- args.output: {0: 'batch'}} if args.dynamic else None,
- opset_version=args.opset,
- )
- logger.info("generated onnx model named {}".format(args.output_name))
-
- if not args.no_onnxsim:
- import onnx
- from onnxsim import simplify
-
- # use onnx-simplifier to reduce reduent model.
- onnx_model = onnx.load(args.output_name)
- model_simp, check = simplify(onnx_model)
- assert check, "Simplified ONNX model could not be validated"
- onnx.save(model_simp, args.output_name)
- logger.info("generated simplified onnx model named {}".format(args.output_name))
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/cocoeval/cocoeval.cpp b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/cocoeval/cocoeval.cpp
deleted file mode 100644
index 2e63bc9952918060f55999ec100b283d83616b46..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/cocoeval/cocoeval.cpp
+++ /dev/null
@@ -1,502 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-#include "cocoeval.h"
-#include
-#include
-#include
-#include
-
-using namespace pybind11::literals;
-
-namespace COCOeval {
-
-// Sort detections from highest score to lowest, such that
-// detection_instances[detection_sorted_indices[t]] >=
-// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match
-// original COCO API
-void SortInstancesByDetectionScore(
- const std::vector& detection_instances,
- std::vector* detection_sorted_indices) {
- detection_sorted_indices->resize(detection_instances.size());
- std::iota(
- detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
- std::stable_sort(
- detection_sorted_indices->begin(),
- detection_sorted_indices->end(),
- [&detection_instances](size_t j1, size_t j2) {
- return detection_instances[j1].score > detection_instances[j2].score;
- });
-}
-
-// Partition the ground truth objects based on whether or not to ignore them
-// based on area
-void SortInstancesByIgnore(
- const std::array& area_range,
- const std::vector& ground_truth_instances,
- std::vector* ground_truth_sorted_indices,
- std::vector* ignores) {
- ignores->clear();
- ignores->reserve(ground_truth_instances.size());
- for (auto o : ground_truth_instances) {
- ignores->push_back(
- o.ignore || o.area < area_range[0] || o.area > area_range[1]);
- }
-
- ground_truth_sorted_indices->resize(ground_truth_instances.size());
- std::iota(
- ground_truth_sorted_indices->begin(),
- ground_truth_sorted_indices->end(),
- 0);
- std::stable_sort(
- ground_truth_sorted_indices->begin(),
- ground_truth_sorted_indices->end(),
- [&ignores](size_t j1, size_t j2) {
- return (int)(*ignores)[j1] < (int)(*ignores)[j2];
- });
-}
-
-// For each IOU threshold, greedily match each detected instance to a ground
-// truth instance (if possible) and store the results
-void MatchDetectionsToGroundTruth(
- const std::vector& detection_instances,
- const std::vector& detection_sorted_indices,
- const std::vector& ground_truth_instances,
- const std::vector& ground_truth_sorted_indices,
- const std::vector& ignores,
- const std::vector>& ious,
- const std::vector& iou_thresholds,
- const std::array& area_range,
- ImageEvaluation* results) {
- // Initialize memory to store return data matches and ignore
- const int num_iou_thresholds = iou_thresholds.size();
- const int num_ground_truth = ground_truth_sorted_indices.size();
- const int num_detections = detection_sorted_indices.size();
- std::vector ground_truth_matches(
- num_iou_thresholds * num_ground_truth, 0);
- std::vector& detection_matches = results->detection_matches;
- std::vector& detection_ignores = results->detection_ignores;
- std::vector& ground_truth_ignores = results->ground_truth_ignores;
- detection_matches.resize(num_iou_thresholds * num_detections, 0);
- detection_ignores.resize(num_iou_thresholds * num_detections, false);
- ground_truth_ignores.resize(num_ground_truth);
- for (auto g = 0; g < num_ground_truth; ++g) {
- ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]];
- }
-
- for (auto t = 0; t < num_iou_thresholds; ++t) {
- for (auto d = 0; d < num_detections; ++d) {
- // information about best match so far (match=-1 -> unmatched)
- double best_iou = std::min(iou_thresholds[t], 1 - 1e-10);
- int match = -1;
- for (auto g = 0; g < num_ground_truth; ++g) {
- // if this ground truth instance is already matched and not a
- // crowd, it cannot be matched to another detection
- if (ground_truth_matches[t * num_ground_truth + g] > 0 &&
- !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) {
- continue;
- }
-
- // if detected instance matched to a regular ground truth
- // instance, we can break on the first ground truth instance
- // tagged as ignore (because they are sorted by the ignore tag)
- if (match >= 0 && !ground_truth_ignores[match] &&
- ground_truth_ignores[g]) {
- break;
- }
-
- // if IOU overlap is the best so far, store the match appropriately
- if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) {
- best_iou = ious[d][ground_truth_sorted_indices[g]];
- match = g;
- }
- }
- // if match was made, store id of match for both detection and
- // ground truth
- if (match >= 0) {
- detection_ignores[t * num_detections + d] = ground_truth_ignores[match];
- detection_matches[t * num_detections + d] =
- ground_truth_instances[ground_truth_sorted_indices[match]].id;
- ground_truth_matches[t * num_ground_truth + match] =
- detection_instances[detection_sorted_indices[d]].id;
- }
-
- // set unmatched detections outside of area range to ignore
- const InstanceAnnotation& detection =
- detection_instances[detection_sorted_indices[d]];
- detection_ignores[t * num_detections + d] =
- detection_ignores[t * num_detections + d] ||
- (detection_matches[t * num_detections + d] == 0 &&
- (detection.area < area_range[0] || detection.area > area_range[1]));
- }
- }
-
- // store detection score results
- results->detection_scores.resize(detection_sorted_indices.size());
- for (size_t d = 0; d < detection_sorted_indices.size(); ++d) {
- results->detection_scores[d] =
- detection_instances[detection_sorted_indices[d]].score;
- }
-}
-
-std::vector EvaluateImages(
- const std::vector>& area_ranges,
- int max_detections,
- const std::vector& iou_thresholds,
- const ImageCategoryInstances>& image_category_ious,
- const ImageCategoryInstances&
- image_category_ground_truth_instances,
- const ImageCategoryInstances&
- image_category_detection_instances) {
- const int num_area_ranges = area_ranges.size();
- const int num_images = image_category_ground_truth_instances.size();
- const int num_categories =
- image_category_ious.size() > 0 ? image_category_ious[0].size() : 0;
- std::vector detection_sorted_indices;
- std::vector ground_truth_sorted_indices;
- std::vector ignores;
- std::vector results_all(
- num_images * num_area_ranges * num_categories);
-
- // Store results for each image, category, and area range combination. Results
- // for each IOU threshold are packed into the same ImageEvaluation object
- for (auto i = 0; i < num_images; ++i) {
- for (auto c = 0; c < num_categories; ++c) {
- const std::vector& ground_truth_instances =
- image_category_ground_truth_instances[i][c];
- const std::vector& detection_instances =
- image_category_detection_instances[i][c];
-
- SortInstancesByDetectionScore(
- detection_instances, &detection_sorted_indices);
- if ((int)detection_sorted_indices.size() > max_detections) {
- detection_sorted_indices.resize(max_detections);
- }
-
- for (size_t a = 0; a < area_ranges.size(); ++a) {
- SortInstancesByIgnore(
- area_ranges[a],
- ground_truth_instances,
- &ground_truth_sorted_indices,
- &ignores);
-
- MatchDetectionsToGroundTruth(
- detection_instances,
- detection_sorted_indices,
- ground_truth_instances,
- ground_truth_sorted_indices,
- ignores,
- image_category_ious[i][c],
- iou_thresholds,
- area_ranges[a],
- &results_all
- [c * num_area_ranges * num_images + a * num_images + i]);
- }
- }
- }
-
- return results_all;
-}
-
-// Convert a python list to a vector
-template
-std::vector list_to_vec(const py::list& l) {
- std::vector v(py::len(l));
- for (int i = 0; i < (int)py::len(l); ++i) {
- v[i] = l[i].cast();
- }
- return v;
-}
-
-// Helper function to Accumulate()
-// Considers the evaluation results applicable to a particular category, area
-// range, and max_detections parameter setting, which begin at
-// evaluations[evaluation_index]. Extracts a sorted list of length n of all
-// applicable detection instances concatenated across all images in the dataset,
-// which are represented by the outputs evaluation_indices, detection_scores,
-// image_detection_indices, and detection_sorted_indices--all of which are
-// length n. evaluation_indices[i] stores the applicable index into
-// evaluations[] for instance i, which has detection score detection_score[i],
-// and is the image_detection_indices[i]'th of the list of detections
-// for the image containing i. detection_sorted_indices[] defines a sorted
-// permutation of the 3 other outputs
-int BuildSortedDetectionList(
- const std::vector& evaluations,
- const int64_t evaluation_index,
- const int64_t num_images,
- const int max_detections,
- std::vector* evaluation_indices,
- std::vector* detection_scores,
- std::vector* detection_sorted_indices,
- std::vector* image_detection_indices) {
- assert(evaluations.size() >= evaluation_index + num_images);
-
- // Extract a list of object instances of the applicable category, area
- // range, and max detections requirements such that they can be sorted
- image_detection_indices->clear();
- evaluation_indices->clear();
- detection_scores->clear();
- image_detection_indices->reserve(num_images * max_detections);
- evaluation_indices->reserve(num_images * max_detections);
- detection_scores->reserve(num_images * max_detections);
- int num_valid_ground_truth = 0;
- for (auto i = 0; i < num_images; ++i) {
- const ImageEvaluation& evaluation = evaluations[evaluation_index + i];
-
- for (int d = 0;
- d < (int)evaluation.detection_scores.size() && d < max_detections;
- ++d) { // detected instances
- evaluation_indices->push_back(evaluation_index + i);
- image_detection_indices->push_back(d);
- detection_scores->push_back(evaluation.detection_scores[d]);
- }
- for (auto ground_truth_ignore : evaluation.ground_truth_ignores) {
- if (!ground_truth_ignore) {
- ++num_valid_ground_truth;
- }
- }
- }
-
- // Sort detections by decreasing score, using stable sort to match
- // python implementation
- detection_sorted_indices->resize(detection_scores->size());
- std::iota(
- detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
- std::stable_sort(
- detection_sorted_indices->begin(),
- detection_sorted_indices->end(),
- [&detection_scores](size_t j1, size_t j2) {
- return (*detection_scores)[j1] > (*detection_scores)[j2];
- });
-
- return num_valid_ground_truth;
-}
-
-// Helper function to Accumulate()
-// Compute a precision recall curve given a sorted list of detected instances
-// encoded in evaluations, evaluation_indices, detection_scores,
-// detection_sorted_indices, image_detection_indices (see
-// BuildSortedDetectionList()). Using vectors precisions and recalls
-// and temporary storage, output the results into precisions_out, recalls_out,
-// and scores_out, which are large buffers containing many precion/recall curves
-// for all possible parameter settings, with precisions_out_index and
-// recalls_out_index defining the applicable indices to store results.
-void ComputePrecisionRecallCurve(
- const int64_t precisions_out_index,
- const int64_t precisions_out_stride,
- const int64_t recalls_out_index,
- const std::vector& recall_thresholds,
- const int iou_threshold_index,
- const int num_iou_thresholds,
- const int num_valid_ground_truth,
- const std::vector& evaluations,
- const std::vector& evaluation_indices,
- const std::vector& detection_scores,
- const std::vector& detection_sorted_indices,
- const std::vector& image_detection_indices,
- std::vector* precisions,
- std::vector* recalls,
- std::vector* precisions_out,
- std::vector* scores_out,
- std::vector* recalls_out) {
- assert(recalls_out->size() > recalls_out_index);
-
- // Compute precision/recall for each instance in the sorted list of detections
- int64_t true_positives_sum = 0, false_positives_sum = 0;
- precisions->clear();
- recalls->clear();
- precisions->reserve(detection_sorted_indices.size());
- recalls->reserve(detection_sorted_indices.size());
- assert(!evaluations.empty() || detection_sorted_indices.empty());
- for (auto detection_sorted_index : detection_sorted_indices) {
- const ImageEvaluation& evaluation =
- evaluations[evaluation_indices[detection_sorted_index]];
- const auto num_detections =
- evaluation.detection_matches.size() / num_iou_thresholds;
- const auto detection_index = iou_threshold_index * num_detections +
- image_detection_indices[detection_sorted_index];
- assert(evaluation.detection_matches.size() > detection_index);
- assert(evaluation.detection_ignores.size() > detection_index);
- const int64_t detection_match =
- evaluation.detection_matches[detection_index];
- const bool detection_ignores =
- evaluation.detection_ignores[detection_index];
- const auto true_positive = detection_match > 0 && !detection_ignores;
- const auto false_positive = detection_match == 0 && !detection_ignores;
- if (true_positive) {
- ++true_positives_sum;
- }
- if (false_positive) {
- ++false_positives_sum;
- }
-
- const double recall =
- static_cast(true_positives_sum) / num_valid_ground_truth;
- recalls->push_back(recall);
- const int64_t num_valid_detections =
- true_positives_sum + false_positives_sum;
- const double precision = num_valid_detections > 0
- ? static_cast(true_positives_sum) / num_valid_detections
- : 0.0;
- precisions->push_back(precision);
- }
-
- (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0;
-
- for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) {
- if ((*precisions)[i] > (*precisions)[i - 1]) {
- (*precisions)[i - 1] = (*precisions)[i];
- }
- }
-
- // Sample the per instance precision/recall list at each recall threshold
- for (size_t r = 0; r < recall_thresholds.size(); ++r) {
- // first index in recalls >= recall_thresholds[r]
- std::vector::iterator low = std::lower_bound(
- recalls->begin(), recalls->end(), recall_thresholds[r]);
- size_t precisions_index = low - recalls->begin();
-
- const auto results_ind = precisions_out_index + r * precisions_out_stride;
- assert(results_ind < precisions_out->size());
- assert(results_ind < scores_out->size());
- if (precisions_index < precisions->size()) {
- (*precisions_out)[results_ind] = (*precisions)[precisions_index];
- (*scores_out)[results_ind] =
- detection_scores[detection_sorted_indices[precisions_index]];
- } else {
- (*precisions_out)[results_ind] = 0;
- (*scores_out)[results_ind] = 0;
- }
- }
-}
-py::dict Accumulate(
- const py::object& params,
- const std::vector& evaluations) {
- const std::vector recall_thresholds =
- list_to_vec(params.attr("recThrs"));
- const std::vector max_detections =
- list_to_vec(params.attr("maxDets"));
- const int num_iou_thresholds = py::len(params.attr("iouThrs"));
- const int num_recall_thresholds = py::len(params.attr("recThrs"));
- const int num_categories = params.attr("useCats").cast() == 1
- ? py::len(params.attr("catIds"))
- : 1;
- const int num_area_ranges = py::len(params.attr("areaRng"));
- const int num_max_detections = py::len(params.attr("maxDets"));
- const int num_images = py::len(params.attr("imgIds"));
-
- std::vector precisions_out(
- num_iou_thresholds * num_recall_thresholds * num_categories *
- num_area_ranges * num_max_detections,
- -1);
- std::vector recalls_out(
- num_iou_thresholds * num_categories * num_area_ranges *
- num_max_detections,
- -1);
- std::vector scores_out(
- num_iou_thresholds * num_recall_thresholds * num_categories *
- num_area_ranges * num_max_detections,
- -1);
-
- // Consider the list of all detected instances in the entire dataset in one
- // large list. evaluation_indices, detection_scores,
- // image_detection_indices, and detection_sorted_indices all have the same
- // length as this list, such that each entry corresponds to one detected
- // instance
- std::vector evaluation_indices; // indices into evaluations[]
- std::vector detection_scores; // detection scores of each instance
- std::vector detection_sorted_indices; // sorted indices of all
- // instances in the dataset
- std::vector
- image_detection_indices; // indices into the list of detected instances in
- // the same image as each instance
- std::vector precisions, recalls;
-
- for (auto c = 0; c < num_categories; ++c) {
- for (auto a = 0; a < num_area_ranges; ++a) {
- for (auto m = 0; m < num_max_detections; ++m) {
- // The COCO PythonAPI assumes evaluations[] (the return value of
- // COCOeval::EvaluateImages() is one long list storing results for each
- // combination of category, area range, and image id, with categories in
- // the outermost loop and images in the innermost loop.
- const int64_t evaluations_index =
- c * num_area_ranges * num_images + a * num_images;
- int num_valid_ground_truth = BuildSortedDetectionList(
- evaluations,
- evaluations_index,
- num_images,
- max_detections[m],
- &evaluation_indices,
- &detection_scores,
- &detection_sorted_indices,
- &image_detection_indices);
-
- if (num_valid_ground_truth == 0) {
- continue;
- }
-
- for (auto t = 0; t < num_iou_thresholds; ++t) {
- // recalls_out is a flattened vectors representing a
- // num_iou_thresholds X num_categories X num_area_ranges X
- // num_max_detections matrix
- const int64_t recalls_out_index =
- t * num_categories * num_area_ranges * num_max_detections +
- c * num_area_ranges * num_max_detections +
- a * num_max_detections + m;
-
- // precisions_out and scores_out are flattened vectors
- // representing a num_iou_thresholds X num_recall_thresholds X
- // num_categories X num_area_ranges X num_max_detections matrix
- const int64_t precisions_out_stride =
- num_categories * num_area_ranges * num_max_detections;
- const int64_t precisions_out_index = t * num_recall_thresholds *
- num_categories * num_area_ranges * num_max_detections +
- c * num_area_ranges * num_max_detections +
- a * num_max_detections + m;
-
- ComputePrecisionRecallCurve(
- precisions_out_index,
- precisions_out_stride,
- recalls_out_index,
- recall_thresholds,
- t,
- num_iou_thresholds,
- num_valid_ground_truth,
- evaluations,
- evaluation_indices,
- detection_scores,
- detection_sorted_indices,
- image_detection_indices,
- &precisions,
- &recalls,
- &precisions_out,
- &scores_out,
- &recalls_out);
- }
- }
- }
- }
-
- time_t rawtime;
- struct tm local_time;
- std::array buffer;
- time(&rawtime);
-#ifdef _WIN32
- localtime_s(&local_time, &rawtime);
-#else
- localtime_r(&rawtime, &local_time);
-#endif
- strftime(
- buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time);
- return py::dict(
- "params"_a = params,
- "counts"_a = std::vector({num_iou_thresholds,
- num_recall_thresholds,
- num_categories,
- num_area_ranges,
- num_max_detections}),
- "date"_a = buffer,
- "precision"_a = precisions_out,
- "recall"_a = recalls_out,
- "scores"_a = scores_out);
-}
-
-} // namespace COCOeval
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/network_blocks.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/network_blocks.py
deleted file mode 100644
index 68aacfc33208eab072422e0647742006984dfdfd..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/models/network_blocks.py
+++ /dev/null
@@ -1,210 +0,0 @@
-#!/usr/bin/env python
-# -*- encoding: utf-8 -*-
-# Copyright (c) Megvii Inc. All rights reserved.
-
-import torch
-import torch.nn as nn
-
-
-class SiLU(nn.Module):
- """export-friendly version of nn.SiLU()"""
-
- @staticmethod
- def forward(x):
- return x * torch.sigmoid(x)
-
-
-def get_activation(name="silu", inplace=True):
- if name == "silu":
- module = nn.SiLU(inplace=inplace)
- elif name == "relu":
- module = nn.ReLU(inplace=inplace)
- elif name == "lrelu":
- module = nn.LeakyReLU(0.1, inplace=inplace)
- else:
- raise AttributeError("Unsupported act type: {}".format(name))
- return module
-
-
-class BaseConv(nn.Module):
- """A Conv2d -> Batchnorm -> silu/leaky relu block"""
-
- def __init__(
- self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu"
- ):
- super().__init__()
- # same padding
- pad = (ksize - 1) // 2
- self.conv = nn.Conv2d(
- in_channels,
- out_channels,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- groups=groups,
- bias=bias,
- )
- self.bn = nn.BatchNorm2d(out_channels)
- self.act = get_activation(act, inplace=True)
-
- def forward(self, x):
- return self.act(self.bn(self.conv(x)))
-
- def fuseforward(self, x):
- return self.act(self.conv(x))
-
-
-class DWConv(nn.Module):
- """Depthwise Conv + Conv"""
-
- def __init__(self, in_channels, out_channels, ksize, stride=1, act="silu"):
- super().__init__()
- self.dconv = BaseConv(
- in_channels,
- in_channels,
- ksize=ksize,
- stride=stride,
- groups=in_channels,
- act=act,
- )
- self.pconv = BaseConv(
- in_channels, out_channels, ksize=1, stride=1, groups=1, act=act
- )
-
- def forward(self, x):
- x = self.dconv(x)
- return self.pconv(x)
-
-
-class Bottleneck(nn.Module):
- # Standard bottleneck
- def __init__(
- self,
- in_channels,
- out_channels,
- shortcut=True,
- expansion=0.5,
- depthwise=False,
- act="silu",
- ):
- super().__init__()
- hidden_channels = int(out_channels * expansion)
- Conv = DWConv if depthwise else BaseConv
- self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
- self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act)
- self.use_add = shortcut and in_channels == out_channels
-
- def forward(self, x):
- y = self.conv2(self.conv1(x))
- if self.use_add:
- y = y + x
- return y
-
-
-class ResLayer(nn.Module):
- "Residual layer with `in_channels` inputs."
-
- def __init__(self, in_channels: int):
- super().__init__()
- mid_channels = in_channels // 2
- self.layer1 = BaseConv(
- in_channels, mid_channels, ksize=1, stride=1, act="lrelu"
- )
- self.layer2 = BaseConv(
- mid_channels, in_channels, ksize=3, stride=1, act="lrelu"
- )
-
- def forward(self, x):
- out = self.layer2(self.layer1(x))
- return x + out
-
-
-class SPPBottleneck(nn.Module):
- """Spatial pyramid pooling layer used in YOLOv3-SPP"""
-
- def __init__(
- self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation="silu"
- ):
- super().__init__()
- hidden_channels = in_channels // 2
- self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation)
- self.m = nn.ModuleList(
- [
- nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
- for ks in kernel_sizes
- ]
- )
- conv2_channels = hidden_channels * (len(kernel_sizes) + 1)
- self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation)
-
- def forward(self, x):
- x = self.conv1(x)
- x = torch.cat([x] + [m(x) for m in self.m], dim=1)
- x = self.conv2(x)
- return x
-
-
-class CSPLayer(nn.Module):
- """C3 in yolov5, CSP Bottleneck with 3 convolutions"""
-
- def __init__(
- self,
- in_channels,
- out_channels,
- n=1,
- shortcut=True,
- expansion=0.5,
- depthwise=False,
- act="silu",
- ):
- """
- Args:
- in_channels (int): input channels.
- out_channels (int): output channels.
- n (int): number of Bottlenecks. Default value: 1.
- """
- # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__()
- hidden_channels = int(out_channels * expansion) # hidden channels
- self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
- self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
- self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act)
- module_list = [
- Bottleneck(
- hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act
- )
- for _ in range(n)
- ]
- self.m = nn.Sequential(*module_list)
-
- def forward(self, x):
- x_1 = self.conv1(x)
- x_2 = self.conv2(x)
- x_1 = self.m(x_1)
- x = torch.cat((x_1, x_2), dim=1)
- return self.conv3(x)
-
-
-class Focus(nn.Module):
- """Focus width and height information into channel space."""
-
- def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="silu"):
- super().__init__()
- self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act)
-
- def forward(self, x):
- # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
- patch_top_left = x[..., ::2, ::2]
- patch_top_right = x[..., ::2, 1::2]
- patch_bot_left = x[..., 1::2, ::2]
- patch_bot_right = x[..., 1::2, 1::2]
- x = torch.cat(
- (
- patch_top_left,
- patch_bot_left,
- patch_top_right,
- patch_bot_right,
- ),
- dim=1,
- )
- return self.conv(x)
diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/minify_dataset.py b/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/minify_dataset.py
deleted file mode 100644
index e6095cecc8e99f231b80a3779b594cc29fd0ddda..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/minify_dataset.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2020 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from pathlib import Path
-
-import fire
-
-
-def minify(src_dir: str, dest_dir: str, n: int):
- """Write first n lines of each file f in src_dir to dest_dir/f"""
- src_dir = Path(src_dir)
- dest_dir = Path(dest_dir)
- dest_dir.mkdir(exist_ok=True)
- for path in src_dir.iterdir():
- new = [x.rstrip() for x in list(path.open().readlines())][:n]
- dest_path = dest_dir.joinpath(path.name)
- print(dest_path)
- dest_path.open("w").write("\n".join(new))
-
-
-if __name__ == "__main__":
- fire.Fire(minify)
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/onnx/summarization/README.md b/spaces/chendl/compositional_test/transformers/examples/research_projects/onnx/summarization/README.md
deleted file mode 100644
index c43b0450ea2c4bfacb2e9f5e2af2b6b41d6b340d..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/onnx/summarization/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-# Bart + Beam Search to ONNX
-
-Author: [@fatcat-z](https://github.com/fatcat-z)
-
-This folder contains an example of exporting Bart + Beam Search generation (`BartForConditionalGeneration`) to ONNX.
-
-Beam Search contains a for-loop workflow, so we need to make them TorchScript-compatible for exporting to ONNX. This example shows how to make a Bart model be TorchScript-compatible by wrapping up it into a new model. In addition, some changes were made to the `beam_search()` function to make it TorchScript-compatible.
-
-
-## How to run the example
-
-To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
-
-```bash
-git clone https://github.com/huggingface/transformers
-cd transformers
-pip install '.[onnxruntime]'
-```
-Then cd in this example folder and run
-```bash
-pip install -r requirements.txt
-```
-
-Now you can run the example command below to get the example ONNX file:
-
-```bash
-python run_onnx_exporter.py --model_name_or_path facebook/bart-base
-```
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh b/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh
deleted file mode 100644
index 6a1bafbdc9c8c944e407bb766a1e5fe6177b0404..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-export PYTHONPATH="../":"${PYTHONPATH}"
-
-export BS=32
-export GAS=1
-
-python finetune.py \
- --learning_rate=3e-5 \
- --fp16 \
- --gpus 1 \
- --do_train \
- --do_predict \
- --val_check_interval 0.25 \
- --n_val 500 \
- --num_train_epochs 2 \
- --freeze_encoder --freeze_embeds --data_dir cnn_dm \
- --max_target_length 142 --val_max_target_length=142 \
- --train_batch_size=$BS --eval_batch_size=$BS --gradient_accumulation_steps=$GAS \
- --model_name_or_path sshleifer/student_cnn_12_6 \
- --tokenizer_name facebook/bart-large \
- --warmup_steps 500 \
- --output_dir distilbart-cnn-12-6 \
- "$@"
-
diff --git a/spaces/chenman/Meina-MeinaMix/app.py b/spaces/chenman/Meina-MeinaMix/app.py
deleted file mode 100644
index 811eafa7aa381ad173de55aa27fb9c091aa0199f..0000000000000000000000000000000000000000
--- a/spaces/chenman/Meina-MeinaMix/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/Meina/MeinaMix").launch()
\ No newline at end of file
diff --git a/spaces/chlab/interactive_kinematic_planet_detector/utils/__init__.py b/spaces/chlab/interactive_kinematic_planet_detector/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/client_ws.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/client_ws.py
deleted file mode 100644
index 9a8ba84ca5082ad6d672c3837d4810e467a8080e..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/aiohttp/client_ws.py
+++ /dev/null
@@ -1,300 +0,0 @@
-"""WebSocket client for asyncio."""
-
-import asyncio
-from typing import Any, Optional, cast
-
-import async_timeout
-
-from .client_exceptions import ClientError
-from .client_reqrep import ClientResponse
-from .helpers import call_later, set_result
-from .http import (
- WS_CLOSED_MESSAGE,
- WS_CLOSING_MESSAGE,
- WebSocketError,
- WSCloseCode,
- WSMessage,
- WSMsgType,
-)
-from .http_websocket import WebSocketWriter # WSMessage
-from .streams import EofStream, FlowControlDataQueue
-from .typedefs import (
- DEFAULT_JSON_DECODER,
- DEFAULT_JSON_ENCODER,
- JSONDecoder,
- JSONEncoder,
-)
-
-
-class ClientWebSocketResponse:
- def __init__(
- self,
- reader: "FlowControlDataQueue[WSMessage]",
- writer: WebSocketWriter,
- protocol: Optional[str],
- response: ClientResponse,
- timeout: float,
- autoclose: bool,
- autoping: bool,
- loop: asyncio.AbstractEventLoop,
- *,
- receive_timeout: Optional[float] = None,
- heartbeat: Optional[float] = None,
- compress: int = 0,
- client_notakeover: bool = False,
- ) -> None:
- self._response = response
- self._conn = response.connection
-
- self._writer = writer
- self._reader = reader
- self._protocol = protocol
- self._closed = False
- self._closing = False
- self._close_code: Optional[int] = None
- self._timeout = timeout
- self._receive_timeout = receive_timeout
- self._autoclose = autoclose
- self._autoping = autoping
- self._heartbeat = heartbeat
- self._heartbeat_cb: Optional[asyncio.TimerHandle] = None
- if heartbeat is not None:
- self._pong_heartbeat = heartbeat / 2.0
- self._pong_response_cb: Optional[asyncio.TimerHandle] = None
- self._loop = loop
- self._waiting: Optional[asyncio.Future[bool]] = None
- self._exception: Optional[BaseException] = None
- self._compress = compress
- self._client_notakeover = client_notakeover
-
- self._reset_heartbeat()
-
- def _cancel_heartbeat(self) -> None:
- if self._pong_response_cb is not None:
- self._pong_response_cb.cancel()
- self._pong_response_cb = None
-
- if self._heartbeat_cb is not None:
- self._heartbeat_cb.cancel()
- self._heartbeat_cb = None
-
- def _reset_heartbeat(self) -> None:
- self._cancel_heartbeat()
-
- if self._heartbeat is not None:
- self._heartbeat_cb = call_later(
- self._send_heartbeat, self._heartbeat, self._loop
- )
-
- def _send_heartbeat(self) -> None:
- if self._heartbeat is not None and not self._closed:
- # fire-and-forget a task is not perfect but maybe ok for
- # sending ping. Otherwise we need a long-living heartbeat
- # task in the class.
- self._loop.create_task(self._writer.ping())
-
- if self._pong_response_cb is not None:
- self._pong_response_cb.cancel()
- self._pong_response_cb = call_later(
- self._pong_not_received, self._pong_heartbeat, self._loop
- )
-
- def _pong_not_received(self) -> None:
- if not self._closed:
- self._closed = True
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._exception = asyncio.TimeoutError()
- self._response.close()
-
- @property
- def closed(self) -> bool:
- return self._closed
-
- @property
- def close_code(self) -> Optional[int]:
- return self._close_code
-
- @property
- def protocol(self) -> Optional[str]:
- return self._protocol
-
- @property
- def compress(self) -> int:
- return self._compress
-
- @property
- def client_notakeover(self) -> bool:
- return self._client_notakeover
-
- def get_extra_info(self, name: str, default: Any = None) -> Any:
- """extra info from connection transport"""
- conn = self._response.connection
- if conn is None:
- return default
- transport = conn.transport
- if transport is None:
- return default
- return transport.get_extra_info(name, default)
-
- def exception(self) -> Optional[BaseException]:
- return self._exception
-
- async def ping(self, message: bytes = b"") -> None:
- await self._writer.ping(message)
-
- async def pong(self, message: bytes = b"") -> None:
- await self._writer.pong(message)
-
- async def send_str(self, data: str, compress: Optional[int] = None) -> None:
- if not isinstance(data, str):
- raise TypeError("data argument must be str (%r)" % type(data))
- await self._writer.send(data, binary=False, compress=compress)
-
- async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None:
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError("data argument must be byte-ish (%r)" % type(data))
- await self._writer.send(data, binary=True, compress=compress)
-
- async def send_json(
- self,
- data: Any,
- compress: Optional[int] = None,
- *,
- dumps: JSONEncoder = DEFAULT_JSON_ENCODER,
- ) -> None:
- await self.send_str(dumps(data), compress=compress)
-
- async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool:
- # we need to break `receive()` cycle first,
- # `close()` may be called from different task
- if self._waiting is not None and not self._closed:
- self._reader.feed_data(WS_CLOSING_MESSAGE, 0)
- await self._waiting
-
- if not self._closed:
- self._cancel_heartbeat()
- self._closed = True
- try:
- await self._writer.close(code, message)
- except asyncio.CancelledError:
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._response.close()
- raise
- except Exception as exc:
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._exception = exc
- self._response.close()
- return True
-
- if self._closing:
- self._response.close()
- return True
-
- while True:
- try:
- async with async_timeout.timeout(self._timeout):
- msg = await self._reader.read()
- except asyncio.CancelledError:
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._response.close()
- raise
- except Exception as exc:
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._exception = exc
- self._response.close()
- return True
-
- if msg.type == WSMsgType.CLOSE:
- self._close_code = msg.data
- self._response.close()
- return True
- else:
- return False
-
- async def receive(self, timeout: Optional[float] = None) -> WSMessage:
- while True:
- if self._waiting is not None:
- raise RuntimeError("Concurrent call to receive() is not allowed")
-
- if self._closed:
- return WS_CLOSED_MESSAGE
- elif self._closing:
- await self.close()
- return WS_CLOSED_MESSAGE
-
- try:
- self._waiting = self._loop.create_future()
- try:
- async with async_timeout.timeout(timeout or self._receive_timeout):
- msg = await self._reader.read()
- self._reset_heartbeat()
- finally:
- waiter = self._waiting
- self._waiting = None
- set_result(waiter, True)
- except (asyncio.CancelledError, asyncio.TimeoutError):
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- raise
- except EofStream:
- self._close_code = WSCloseCode.OK
- await self.close()
- return WSMessage(WSMsgType.CLOSED, None, None)
- except ClientError:
- self._closed = True
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- return WS_CLOSED_MESSAGE
- except WebSocketError as exc:
- self._close_code = exc.code
- await self.close(code=exc.code)
- return WSMessage(WSMsgType.ERROR, exc, None)
- except Exception as exc:
- self._exception = exc
- self._closing = True
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- await self.close()
- return WSMessage(WSMsgType.ERROR, exc, None)
-
- if msg.type == WSMsgType.CLOSE:
- self._closing = True
- self._close_code = msg.data
- if not self._closed and self._autoclose:
- await self.close()
- elif msg.type == WSMsgType.CLOSING:
- self._closing = True
- elif msg.type == WSMsgType.PING and self._autoping:
- await self.pong(msg.data)
- continue
- elif msg.type == WSMsgType.PONG and self._autoping:
- continue
-
- return msg
-
- async def receive_str(self, *, timeout: Optional[float] = None) -> str:
- msg = await self.receive(timeout)
- if msg.type != WSMsgType.TEXT:
- raise TypeError(f"Received message {msg.type}:{msg.data!r} is not str")
- return cast(str, msg.data)
-
- async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes:
- msg = await self.receive(timeout)
- if msg.type != WSMsgType.BINARY:
- raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes")
- return cast(bytes, msg.data)
-
- async def receive_json(
- self,
- *,
- loads: JSONDecoder = DEFAULT_JSON_DECODER,
- timeout: Optional[float] = None,
- ) -> Any:
- data = await self.receive_str(timeout=timeout)
- return loads(data)
-
- def __aiter__(self) -> "ClientWebSocketResponse":
- return self
-
- async def __anext__(self) -> WSMessage:
- msg = await self.receive()
- if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED):
- raise StopAsyncIteration
- return msg
diff --git a/spaces/cihyFjudo/fairness-paper-search/Cypheros TS-Doctor 1.22 Portable PORTABLE.md b/spaces/cihyFjudo/fairness-paper-search/Cypheros TS-Doctor 1.22 Portable PORTABLE.md
deleted file mode 100644
index 4e7cd4afc66b41bdae12adc7081215e48701c9a8..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Cypheros TS-Doctor 1.22 Portable PORTABLE.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Cypheros TS-Doctor 1.22 Portable Download Zip ››››› https://tinurli.com/2uwjIP
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Download Film Il Risveglio Del Dinosauro 3 Full Movie.md b/spaces/cihyFjudo/fairness-paper-search/Download Film Il Risveglio Del Dinosauro 3 Full Movie.md
deleted file mode 100644
index de6d14cb14c35a6fe33b8671271e53bb36c7d06e..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Download Film Il Risveglio Del Dinosauro 3 Full Movie.md
+++ /dev/null
@@ -1,6 +0,0 @@
-download film Il risveglio del dinosauro 3 full movie Download Zip 🔗 https://tinurli.com/2uwkBM
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Nobel Font Free Download Mac The Ultimate Guide to Installing and Using This Classic Font.md b/spaces/cihyFjudo/fairness-paper-search/Nobel Font Free Download Mac The Ultimate Guide to Installing and Using This Classic Font.md
deleted file mode 100644
index 5056188b060c3b81cc24066ea5707589ac7ec64d..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Nobel Font Free Download Mac The Ultimate Guide to Installing and Using This Classic Font.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-Click to view font family "Nobel".Nobel BoldNobel Condensed BoldNobel Condensed RegularNobel LightNobel Regular Italic About the font Nobel RegularBe aware that the Nobel Regular font is free for personal knowledge and use only. However, you need to contact the author for commercial use or for any support.You can use the Nobel Regular to create interesting designs, covers, shop and store name and logos.Also, the Nobel Regular font is perfect for branding projects, housewares designs, product packaging, or simply as a stylish text overlay on any background image.FamilyNobelSub-familyRegularVersion001.000AuthorCompanySiteCopyrightLicenceFor personal use onlyLicence MaisFontesFor personal use onlyMost wanted:fontes gratis, baixar fontes gratis, font ttf, fontes para word gratis, fonts free Typography Nobel RegularTo evaluate the typeface, in this section there is a preview of which we select 31 special characters or with accents, 26 letters of the alphabet in upper and lower case and the numbering from 0 to 10. The letters will be the same after installed in your operating system, either for viewing or for printing. Nobel Regular font authorFurthermore, about all the content of this source, we also provide some additional information from the author and/or company. Therefore, if you need to clarify doubts about the license for personal or commercial use, please contact the author. Author not found. License informationThe Nobel Regular font provided is for typography style knowledge only. The download is completely free for personal use and the font cannot be used for commercial purposes.Therefore, if you wish to use this font for commercial purposes, you must purchase a license or contact the author for permission to use it. How to install the Nobel Regular fontYou can install the Nobel Regular font on any operating system. For safety and to ensure that there is no Malware or malicious software , downloading the source file é compressed in ZIP format. Fonts are in OTF (OpenType ) or TTF (TrueType ) format.
Click here to install the font on Microsoft Windows (all versions). Click here to install the font on MAC OS. Content related to Nobel RegularWe found new special content and prepared with all dedication! The content below is related to the source Nobel Regular . Click on the topic you want to learn more! Download Nobel FontsThe elegance of a good font can make a great visual impact to the end user. Discover Nobel fonts and download for free. Download variations of Nobel RegularAccording to the Nobel Regular font family , below, we have listed other fonts that may be useful for your project. We have made an improved selection especially for you.Random fonts: Click to load 3 other fontsNobel Bold Download this fontNobel Condensed Bold Download this fontNobel Condensed Regular Download this fontNobel Light Download this fontNobel Regular Italic Download this font Leave your feedback for the Nobel Regular fontFinally, it's very important that we know your feedback about the Nobel Regular font. Also tell us what type of project you used. Sharing your opinion and ideas will help many other participants in the MaisFontes community to improve the arts. Also take the opportunity to share on social networks or click SAVE to keep this font in your fonts panel in the User Portal. Create a free account on MaisFontes by clicking here. Cloud words: Nobel Regular Nobel Regular font download;Nobel Regular font free;Nobel Regular download;Nobel Regular Font;Nobel Regular Logotipo;free font Nobel Regular;Nobel Regular free font;Font Nobel Regular; × Nobel RegularEmail type correctly your email Cancel Send email× Click to show the lettertypenobel-regular.png Save imageDonate and help us!Continue browsing Type your comment below. Cancel CommentComentários ComentarBe the first to comment.if(typeof ez_ad_units!='undefined')ez_ad_units.push([[300,250],'maisfontes_com-medrectangle-1','ezslot_11',117,'0','0']);__ez_fad_position('div-gpt-ad-maisfontes_com-medrectangle-1-0');report this ad ©MaisFontes 2014-2023
-Nobel Font Free Download Mac DOWNLOAD ->>->>->> https://tinurli.com/2uwiTB
-There were 6 fonts were included in the font family but this font only selected for the work of Guggenheim Museum with the pairing of Bangers font. This font would provide a vintage look to your designs. This font family is also included in the Google fonts as well as Adobe fonts. You can free download this font from our website but only for personal projects. Brandon Grotesque Font and Nobel Font are the most similar fonts to the Verlag font.
-This font would provide a vintage look to your designs. This font family is also included in the Google fonts as well as Adobe fonts. You can free download this font from our website but only for personal projects. Brandon Grotesque Font and Nobel Font are the most similar fonts to the Verlag font.
-If you are looking for a free version of this typeface then we providing a freeware font and you can utilize the font in your personal projects. You just need to click on the below download button to get to your operating systems.
-If your on windows and have access to your fonts library through the control or command center you can simply find a free download of the font that you need and copy them into your fonts library. restart adobe and you should have the new fonts
-
-A total of 6 fonts are available in this font family and Guggenheim Museum used this font with the combination of Bangers font. Its light black weight is the best known for its pairing functions. This would be an ideal font to take a traditional look at your designs. Moreover, It can be freely used withing Adobe fonts and it is also available in Google fonts.
-It has wonderful 192 glyphs and providing more than 250 stylish characters that are very suitable for your titling and headlines. Brandon Grotesque Font and Nobel Font are the most similar fonts to the Verlag font. We are providing a free version of this font family on this website and you can utilize it in all your graphic designs.
-This font family is free in any type of designing project with your adobe accounts such as you can use many other typefaces in the font library. But without an Adobe account, you should talk to its designer for its license.
-NOBEL-REGULAR.OTF, Nobel Regular, nobel, Nobel Regular, nobel, Nobel-Regular.otf, Windows, OTF, font The fonts presented on this website are their authors' property, and are either freeware, shareware, demo versions or public domain.
-Nobel-Bold NOBEL-BOLD.OTF , Nobel Bold , nobel , Nobel Bold , nobel , Nobel-Bold.otf , Windows , OTF , font The fonts presented on this website are their authors' property, and are either freeware, shareware, demo versions or public domain.
-Dtl nobel font family Browse a full collection of fronts from the dtl nobel font family. This family contains 8 fonts in styles such as t bold, t condensed bold, t light, t regular, t light italic, t condensed regular, t euro and t italic.
-Fortunately, there are a lot of excellent font repositories that do go to great lengths to curate their collections. One of our favorites is Google Fonts, which offers over 900 font families in several languages, all of them free:
-The great thing about WhatsApp is that it's constantly updating its offering. When we first collated this list, WhatsApp was best for personal use as it didn't have any business options available. Since launching WhatsApp for business it can now cater to a new clientele, allowing companies to communicate and respond with customers in real-time, provided they download the free WhatsApp for business App. We'd still recommend using WhatsApp for personal use though, given that the service is only available for Android users. However, given the ever-evolving nature of WhatsApp for Business, it may still be worth giving it a go.
-Additionally, Viber offers a similar service as FaceTime and WhatsApp, in that you can call anyone internationally for free, as long as they are also using the Viber app. Granted, it's not as popular as WhatsApp or FaceTime, but it's free to download, which could make for an easy and affordable alternative.
-Viber's international calling app is probably best suited for personal use . Given the more expensive rates for international calling and the fact that most of your clients probably don't have the Viber app, it's safe to say it's not ideal to run a business on it. However, if you're trying to make personal calls, it's safe to say it would be much easier to get them to download the app to avoid the cost. Still, you can always use our free VoIP comparison tool to find something a system that's more tailored to your needs.
-All rights for the fonts given on this website reserved by their owners (authors, designers). The license given on the font page only represents received data. For detailed information, please, read the files (e.g., readme.txt) from archive or visit the website given by an author (designer) or contact with him if you have any doubt. If there is no reported author (designer) or license, it means that there is no information on the given font, but it does not mean that the font is free.
-By creating a free account at Free-Ebooks(Opens in a new window), you can download up to five free titles each month. If you want more, a $50 VIP plan grants you an unlimited number of free books for life. But the free account is a good way to get started. Just watch out for the special offers and promotions as you sign up for your account.
-Select a specific book to see customer reviews and other information. Click the Get for free button to download an ebook or audiobook. You can read or play the book directly at the website or snag the free Google Books app for iOS/iPadOS(Opens in a new window) or Android(Opens in a new window) to access the title on your mobile device.
-Yes, you can write a book in Microsoft Word, and it may even help you write your book faster and increase your writing focus, since you can save time formatting and setting up your book layout. You can write a book in Word by downloading a free book template, by using the standard book template Word offers, or by setting up your own (which is more time-consuming).
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Remo Software Activation Key 23.md b/spaces/cihyFjudo/fairness-paper-search/Remo Software Activation Key 23.md
deleted file mode 100644
index 7c4332b53170c5bbba95c52d94fdd72931ec3683..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Remo Software Activation Key 23.md
+++ /dev/null
@@ -1,33 +0,0 @@
-
-If the LAN cable is connected to a switch/hub in the network, and if the IP address is set accordingly on the TNC, then the transmission of data to and from the TNC can be started through the use of the machine's IP address with TNCremo, for example.
-There are several possibilities depending on what is needed: - TNCremo PLUS - This allows you to display a live image of your control. For more information, please refer to the following link: -shop-floor/connected-machining- TeleService - This lets you display a live image of your control and operate it.- StateMonitor - This gives you fundamental information about machine availability, whether a program is running, or whether an error is shown. For more information, please refer to the following link: -portal.com/en/software/machine-data-collection/- SRI State Reporting Interface - With its State Reporting Interface (SRI) software option, HEIDENHAIN offers an interface for easily making machine operating statuses available to a higher-level MES or ERP system.
-remo software activation key 23 Download Zip ❤ https://tinurli.com/2uwk3g
-Newer controls (TNC 4xx or later) feature a GoldCap capacitor, which ensures that the data are buffered while the battery is being changed. However, the battery should not be removed for an extended period of time. Older controls do not have a capacitor, and their data are not saved when the control is switched off.
-The software of the test version and full version is identical. It is enabled via a hardware dongle that is ordinarily built into the programming station keyboard. Whether this kind of dongle or an external dongle is connected depends on the given driver. The driver can be downloaded from our website free of charge but is dependent on the model. If you are unsure, then simply install both drivers.
-The postprocessor must always be purchased from the CAM system manufacturerer. Whether 2D, 3D (inclined machining), or 3D simultaneous machining is required depends on the machine, the NC software, and, of course, on the range of parts to be machined.
-We try to list the most commonly encountered free software license onthis page, but cannot list them all; we'll try our best to answerquestions about free software licenses whether or not they are listedhere. The licenses are more or less in alphabetical order within eachsection.
-If you have questions about free software licenses, you can emailusat .Because our resources are limited, we do not answer questions that aremeant to assist proprietary software development or distribution, andyou'll likely get an answer faster if you ask a specific question thatisn't already covered here or in ourFAQ. Wewelcome knowledgeablevolunteers who want to help answer licensing questions.
-If you are contemplating writing a new license, please also contactus at . Theproliferation of different free software licenses is a significantproblem in the free software community today, both for users anddevelopers. We will do our best to help you find an existing freesoftware license that meets your needs.
-Please note that GPLv3 is not compatible with GPLv2 by itself.However, most software released under GPLv2 allows you to use theterms of later versions of the GPL as well. When this is the case,you can use the code under GPLv3 to make the desired combination. Tolearn more about compatibility between GNU licenses,please see ourFAQ.
-
-Please note that GPLv2 is, by itself, not compatible with GPLv3.However, most software released under GPLv2 allows you to use theterms of later versions of the GPL as well. When this is the case,you can use the code under GPLv3 to make the desired combination. Tolearn more about compatibility between GNU licenses,please see ourFAQ.
-This is the latest version of the LGPL: a free software license, but nota strong copyleft license, because it permits linking with nonfreemodules. It is compatible with GPLv3. We recommend it for special circumstancesonly.
-Please note that LGPLv3 is not compatible with GPLv2 by itself.However, most software released under GPLv2 allows you to use theterms of later versions of the GPL as well. When this is the case,you can use the code under GPLv3 to make the desired combination. Tolearn more about compatibility between GNU licenses,please see ourFAQ.
-This is the previous version of the LGPL: a free software license,but not a strong copyleft license, because it permits linking withnonfree modules. It is compatible with GPLv2 and GPLv3. Wegenerally recommend the latest version of theLGPL, for specialcircumstances only. To learn more about how LGPLv2.1 iscompatible with other GNU licenses,please see ourFAQ.
-This is a free software, copyleft license. Its terms effectivelyconsist of the terms of GPLv3, with an additional paragraph in section 13to allow users who interact with the licensed software over a network toreceive the source for that program. We recommend that developers considerusing the GNU AGPL for any software which will commonly be run over anetwork.
-This is a lax, permissive free software license, compatible withthe GNU GPL, which we recommend GNU packages use for README and othersmall supporting files. All developers can feel free to use it insimilar situations.
-This is a free software license, compatible with both GPLv2 andGPLv3. It is based on the modified BSDlicense, and adds a term expressly stating it does not grant youany patent licenses. Because of this, we encourage you to be carefulabout using software under this license; you should first considerwhether the licensor might want to sue you for patent infringement.If the developer is refusing users patent licenses to set up a trapfor you, it would be wise to avoid the program.
-The eCos license version 2.0 is a GPL-compatible free softwarelicense. It consists of the GPL, plus an exception allowing linking tosoftware not under the GPL. This license has the same disadvantagesas the LGPL.
-This is a free software license, and it is compatible withGPLv3. It is based on the Apache License2.0; the scope of the patent license has changed so that whenan organization's employee works on a project, the organizationdoes not have to license all of its patents to recipients. Thispatent license and the indemnification clause in section 9 makethis license incompatible with GPLv2.
-This is a free software license, and compatible with the GNU GPL.The authors have assured us that developers who document changes asrequired by the GPL will also comply with the similar requirement inthis license.
-In the United States, these licenses are supposed to be interpretedbased on what the author seems to intend. So they probably mean whatthey appear to mean. That would make them non-copyleft free softwarelicenses and compatible with the GNU GPL. However, an unlucky choiceof wording could give it a different meaning.
-It's important to understand that the condition to distribute filesunder the MPL's terms only applies to the party that first creates anddistributes the Larger Work. If it applied to their recipients as well, itwould be a further restriction and incompatible with the GPL and AGPL.That said, when you make contributions to an existing project, we usuallyrecommend that you keep your changes under the same license,even when you're not required to do so. If you receive a work under a GNUlicense where some files are also under the MPL, you should only remove theMPL from those files when there's a strong reason to justify it.
-Software under previous versions of the MPL can be upgraded to version2.0, but any software that isn't already available under one of thelisted GNU licenses must be marked as Incompatible With SecondaryLicenses. This means that software that's only available underprevious versions of the MPL is still incompatible with the GPL and AGPL.
-Being in the public domain is not a license; rather, it means thematerial is not copyrighted and no license is needed. Practicallyspeaking, though, if a work is in the public domain, it might as wellhave an all-permissive non-copyleft free software license. Publicdomain material is compatible with the GNU GPL.
-Previous versions of the SGI Free Software License B were not freesoftware licenses, despite their name. However, they all includedclauses that allow you to upgrade to new versions of the license, if youchoose to do so. As a result, if a piece of software was released underany version of the SGI Free License B, you can use it under the terms ofthis free version.
-If you want to use files covered by this License Agreement in yourown software, that shouldn't be any problem, but we recommend thatyou also include a full copy of its text. Some of the files containalternative license terms which are nonfree, or no licensinginformation at all, so including a copy of the License Agreementwill help avoid confusion when others want to distribute yoursoftware. Of course, you'll also need to follow the conditions inthis License Agreement for distributing the files, but those arevery straightforward.
-Please do not use this License Agreement for your own software. Ifyou want to use a lax permissive license for your project, please usethe Expat license for a small program and theApache 2.0 license for a substantial program. These are far morecommon, and widely recognized in the free software community.
-This is a lax, permissive non-copyleft free software license, compatiblewith the GNU GPL. The license does provide the ability to licensepatents along with the software work, however, we still recommend theApache 2.0 license for avoiding patent treachery when choosing to putyour work under a lax license.
-The WxWidgets license is a GPL-compatible free software license. Itconsists of the GNULesser GPL 2.0 or any later version, plus an additional permissionallowing binary distributions that use the library to be licensedunder terms of the distributor's choice (including proprietary). Itis a weak copyleft, even weaker than the LGPL, so we recommendit only in specialcircumstances.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/[Korean nuclear fusion reactor achieves 100 millionC for 30 seconds](1).md b/spaces/cihyFjudo/fairness-paper-search/[Korean nuclear fusion reactor achieves 100 millionC for 30 seconds](1).md
deleted file mode 100644
index 495fd373c441a9baa84e9144738088c123427078..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/[Korean nuclear fusion reactor achieves 100 millionC for 30 seconds](1).md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-Select the 3D polyline to join: pick a 3Dpolyline Select the other 3D polyline to join: pick a 3D polylinethat has a common endpoint with the first 3 segments added to the polyline.
-Join 3d Polyline Autocad Download • https://tinurli.com/2uwhZM
-The lines are created (in a .dxf) by a GPS unit in the field, and represent tracks needing to be mapped. Typically we would drive out a track and then drive back on the same track; this gives me two 3d polylines. Because of GPS errors it better practice to log the roads/ tracks twice. The lines can have from 2000 to 10000 vertices, so manually drawing is not an option. Any ideas? Thanks
-Create a 3d object away from the two 3d objects you need to join, type SUBTRACT, select the two 3d objects as regions to subtract from, then select the created 3d object away from the two as the region to subtract to... press enter
-@James Maeding Hi. I'm using V21-2.02-01. I've drawn 2 3DPOLY that have 3 vertex each, and with different Z values, but one vertex for each one of them was the same( X,Y,Z). I used the JOIN command and it did joined them to one entity, kipping the joined entity as a 3DPOLY. I did another test, Drawing PLOYLINE (x,y and elevation) and a 3DPOLY with different z values and one joint vertex, and when using the join command the joined object turned in to 3DPOLY and kept the z values (Elevation of the 2DPOLY turned to Z).
-
-Try to implement this small trick into your work and you will see a great improvement in speed and quality of your drawings! If you want to learn more tricks concerning Polylines you can look at those: Create Polyline around the Outside of Multiple Objects! Edit Vertexes and use Linetype Polylines! Coordinates of polyline? But How and Why? Split polylines into equal segments!
-This application joins and cleans-up polylines and line entities to create closed boundaries and to close gaps not seen visibly. The app includes 9 functions; each with additional settings and sub-routines for quick and automatic joining, trimming, cleaning and closing. With 9 user-defined settings. 1. Join Polylines Automatically 2. Checks Polyline Closure 3. Trims Extended Objects 4. Trims and Joins selected lines 5. Joins Multiple Polyline Endpoints 6. Auto Link 7. Joins Broken Tapered Polyline(s) 8. Closes Opened Polygon(s) 9. Trims Lines and Polylines in/out Polygon(s)$50.00 to purchase from within the app using a Paypal account OR make a one-time purchase with a Debit or Credit Card through Paypal (no Paypal account required)Note: This app uses a custom installer (and not the standard App Store installer).
-There are various polyline types in AutoCAD®, the most common of which is usually the LWPolyline (lightweight polyline). It can be confusing because the LWPolyline and the old Polyline entity used way back in the early days of AutoCAD® are both shown as a "Polyline" in the Properties window. Chances are though, unless you're working on some nasty generated drawing, you're probably using LWPolylines.
-The third polyline type is the 3D polyline, which as the name implies is a 3D version. Both the LWPolyline and the Polyline only permit the creation of geometry on a flat plane (UCS), but the 3D polyline allows points anywhere in 3D space.
-Converting between the various types therefore has obvious difficulty, because going from 3D to 2D means you're going to have to remove some of the 3D information from the polyline, and AutoCAD® could interpret how to do this in many ways. Therefore historically there have not been native commands to convert between the polyline types.
-The FLATTEN command is an option for making the polyline flat (i.e., visibly the same as before, but drawn as a 2D line on whatever UCS you're working on). I have had troubles with FLATTEN in the past however, because I think it does some odd stuff sometimes with merging lines and approximating... so I tend to avoid it where possible.
-the tool with then ask you to select your first line segment - which if drawn using the line tool will be a single line - it will then ask to convert this segment to a polyline as shown below - say Yes
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cjayic/sovits-overwatch2/modules.py b/spaces/cjayic/sovits-overwatch2/modules.py
deleted file mode 100644
index eb2d8e5f84c94a4f578280cef3d6327ac0d6a773..0000000000000000000000000000000000000000
--- a/spaces/cjayic/sovits-overwatch2/modules.py
+++ /dev/null
@@ -1,449 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-import commons
-from commons import init_weights, get_padding
-from transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(
- nn.ReLU(),
- nn.Dropout(p_dropout))
- for _ in range(n_layers-1):
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size ** i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
- groups=channels, dilation=dilation, padding=padding
- ))
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
- super(WN, self).__init__()
- assert(kernel_size % 2 == 1)
- self.hidden_channels =hidden_channels
- self.kernel_size = kernel_size,
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
- for i in range(n_layers):
- dilation = dilation_rate ** i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
- dilation=dilation, padding=padding)
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(
- x_in,
- g_l,
- n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:,self.hidden_channels:,:]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2])))
- ])
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1)))
- ])
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1])))
- ])
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels,1))
- self.logs = nn.Parameter(torch.zeros(channels,1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1,2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
-
-class ConvFlow(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
-
-# modules from StarGANv2-VC
-
-class DownSample(nn.Module):
- def __init__(self, layer_type):
- super().__init__()
- self.layer_type = layer_type
-
- def forward(self, x):
- if self.layer_type == 'none':
- return x
- elif self.layer_type == 'timepreserve':
- return F.avg_pool2d(x, (2, 1))
- elif self.layer_type == 'half':
- return F.avg_pool2d(x, 2)
- else:
- raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
-
-class ResBlock3(nn.Module):
- def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),
- normalize=False, downsample='none'):
- super().__init__()
- self.actv = actv
- self.normalize = normalize
- self.downsample = DownSample(downsample)
- self.learned_sc = dim_in != dim_out
- self._build_weights(dim_in, dim_out)
-
- def _build_weights(self, dim_in, dim_out):
- self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
- self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
- if self.normalize:
- self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
- self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
- if self.learned_sc:
- self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
-
- def _shortcut(self, x):
- if self.learned_sc:
- x = self.conv1x1(x)
- if self.downsample:
- x = self.downsample(x)
- return x
-
- def _residual(self, x):
- if self.normalize:
- x = self.norm1(x)
- x = self.actv(x)
- x = self.conv1(x)
- x = self.downsample(x)
- if self.normalize:
- x = self.norm2(x)
- x = self.actv(x)
- x = self.conv2(x)
- return x
-
- def forward(self, x):
- x = self._shortcut(x) + self._residual(x)
- return x / math.sqrt(2) # unit variance
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/transformPen.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/transformPen.py
deleted file mode 100644
index 2e572f612e6a29d0a782a0b278deaed9f98f5127..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/transformPen.py
+++ /dev/null
@@ -1,111 +0,0 @@
-from fontTools.pens.filterPen import FilterPen, FilterPointPen
-
-
-__all__ = ["TransformPen", "TransformPointPen"]
-
-
-class TransformPen(FilterPen):
-
- """Pen that transforms all coordinates using a Affine transformation,
- and passes them to another pen.
- """
-
- def __init__(self, outPen, transformation):
- """The 'outPen' argument is another pen object. It will receive the
- transformed coordinates. The 'transformation' argument can either
- be a six-tuple, or a fontTools.misc.transform.Transform object.
- """
- super(TransformPen, self).__init__(outPen)
- if not hasattr(transformation, "transformPoint"):
- from fontTools.misc.transform import Transform
-
- transformation = Transform(*transformation)
- self._transformation = transformation
- self._transformPoint = transformation.transformPoint
- self._stack = []
-
- def moveTo(self, pt):
- self._outPen.moveTo(self._transformPoint(pt))
-
- def lineTo(self, pt):
- self._outPen.lineTo(self._transformPoint(pt))
-
- def curveTo(self, *points):
- self._outPen.curveTo(*self._transformPoints(points))
-
- def qCurveTo(self, *points):
- if points[-1] is None:
- points = self._transformPoints(points[:-1]) + [None]
- else:
- points = self._transformPoints(points)
- self._outPen.qCurveTo(*points)
-
- def _transformPoints(self, points):
- transformPoint = self._transformPoint
- return [transformPoint(pt) for pt in points]
-
- def closePath(self):
- self._outPen.closePath()
-
- def endPath(self):
- self._outPen.endPath()
-
- def addComponent(self, glyphName, transformation):
- transformation = self._transformation.transform(transformation)
- self._outPen.addComponent(glyphName, transformation)
-
-
-class TransformPointPen(FilterPointPen):
- """PointPen that transforms all coordinates using a Affine transformation,
- and passes them to another PointPen.
-
- >>> from fontTools.pens.recordingPen import RecordingPointPen
- >>> rec = RecordingPointPen()
- >>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5))
- >>> v = iter(rec.value)
- >>> pen.beginPath(identifier="contour-0")
- >>> next(v)
- ('beginPath', (), {'identifier': 'contour-0'})
- >>> pen.addPoint((100, 100), "line")
- >>> next(v)
- ('addPoint', ((190, 205), 'line', False, None), {})
- >>> pen.endPath()
- >>> next(v)
- ('endPath', (), {})
- >>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0")
- >>> next(v)
- ('addComponent', ('a', ), {'identifier': 'component-0'})
- """
-
- def __init__(self, outPointPen, transformation):
- """The 'outPointPen' argument is another point pen object.
- It will receive the transformed coordinates.
- The 'transformation' argument can either be a six-tuple, or a
- fontTools.misc.transform.Transform object.
- """
- super().__init__(outPointPen)
- if not hasattr(transformation, "transformPoint"):
- from fontTools.misc.transform import Transform
-
- transformation = Transform(*transformation)
- self._transformation = transformation
- self._transformPoint = transformation.transformPoint
-
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
- self._outPen.addPoint(
- self._transformPoint(pt), segmentType, smooth, name, **kwargs
- )
-
- def addComponent(self, baseGlyphName, transformation, **kwargs):
- transformation = self._transformation.transform(transformation)
- self._outPen.addComponent(baseGlyphName, transformation, **kwargs)
-
-
-if __name__ == "__main__":
- from fontTools.pens.basePen import _TestPen
-
- pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
- pen.moveTo((0, 0))
- pen.lineTo((0, 100))
- pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
- pen.closePath()
diff --git a/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/suncc/stdatomic.h b/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/suncc/stdatomic.h
deleted file mode 100644
index 0cf89e0f78d79d48b411f6e4a8450030aee757d7..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/suncc/stdatomic.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef COMPAT_ATOMICS_SUNCC_STDATOMIC_H
-#define COMPAT_ATOMICS_SUNCC_STDATOMIC_H
-
-#include
-#include
-#include
-#include
-
-#define ATOMIC_FLAG_INIT 0
-
-#define ATOMIC_VAR_INIT(value) (value)
-
-#define atomic_init(obj, value) \
-do { \
- *(obj) = (value); \
-} while(0)
-
-#define kill_dependency(y) ((void)0)
-
-#define atomic_thread_fence(order) \
- __machine_rw_barrier();
-
-#define atomic_signal_fence(order) \
- ((void)0)
-
-#define atomic_is_lock_free(obj) 0
-
-typedef intptr_t atomic_flag;
-typedef intptr_t atomic_bool;
-typedef intptr_t atomic_char;
-typedef intptr_t atomic_schar;
-typedef intptr_t atomic_uchar;
-typedef intptr_t atomic_short;
-typedef intptr_t atomic_ushort;
-typedef intptr_t atomic_int;
-typedef intptr_t atomic_uint;
-typedef intptr_t atomic_long;
-typedef intptr_t atomic_ulong;
-typedef intptr_t atomic_llong;
-typedef intptr_t atomic_ullong;
-typedef intptr_t atomic_wchar_t;
-typedef intptr_t atomic_int_least8_t;
-typedef intptr_t atomic_uint_least8_t;
-typedef intptr_t atomic_int_least16_t;
-typedef intptr_t atomic_uint_least16_t;
-typedef intptr_t atomic_int_least32_t;
-typedef intptr_t atomic_uint_least32_t;
-typedef intptr_t atomic_int_least64_t;
-typedef intptr_t atomic_uint_least64_t;
-typedef intptr_t atomic_int_fast8_t;
-typedef intptr_t atomic_uint_fast8_t;
-typedef intptr_t atomic_int_fast16_t;
-typedef intptr_t atomic_uint_fast16_t;
-typedef intptr_t atomic_int_fast32_t;
-typedef intptr_t atomic_uint_fast32_t;
-typedef intptr_t atomic_int_fast64_t;
-typedef intptr_t atomic_uint_fast64_t;
-typedef intptr_t atomic_intptr_t;
-typedef intptr_t atomic_uintptr_t;
-typedef intptr_t atomic_size_t;
-typedef intptr_t atomic_ptrdiff_t;
-typedef intptr_t atomic_intmax_t;
-typedef intptr_t atomic_uintmax_t;
-
-static inline void atomic_store(intptr_t *object, intptr_t desired)
-{
- *object = desired;
- __machine_rw_barrier();
-}
-
-#define atomic_store_explicit(object, desired, order) \
- atomic_store(object, desired)
-
-static inline intptr_t atomic_load(intptr_t *object)
-{
- __machine_rw_barrier();
- return *object;
-}
-
-#define atomic_load_explicit(object, order) \
- atomic_load(object)
-
-#define atomic_exchange(object, desired) \
- atomic_swap_ptr(object, desired)
-
-#define atomic_exchange_explicit(object, desired, order) \
- atomic_exchange(object, desired)
-
-static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *expected,
- intptr_t desired)
-{
- intptr_t old = *expected;
- *expected = (intptr_t)atomic_cas_ptr(object, (void *)old, (void *)desired);
- return *expected == old;
-}
-
-#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \
- atomic_compare_exchange_strong(object, expected, desired)
-
-#define atomic_compare_exchange_weak(object, expected, desired) \
- atomic_compare_exchange_strong(object, expected, desired)
-
-#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \
- atomic_compare_exchange_weak(object, expected, desired)
-
-static inline intptr_t atomic_fetch_add(intptr_t *object, intptr_t operand)
-{
- return atomic_add_ptr_nv(object, operand) - operand;
-}
-
-#define atomic_fetch_sub(object, operand) \
- atomic_fetch_add(object, -(operand))
-
-static inline intptr_t atomic_fetch_or(intptr_t *object, intptr_t operand)
-{
- intptr_t old;
- do {
- old = atomic_load(object);
- } while (!atomic_compare_exchange_strong(object, old, old | operand));
- return old;
-}
-
-static inline intptr_t atomic_fetch_xor(intptr_t *object, intptr_t operand)
-{
- intptr_t old;
- do {
- old = atomic_load(object);
- } while (!atomic_compare_exchange_strong(object, old, old ^ operand));
- return old;
-}
-
-static inline intptr_t atomic_fetch_and(intptr_t *object, intptr_t operand)
-{
- intptr_t old;
- do {
- old = atomic_load(object);
- } while (!atomic_compare_exchange_strong(object, old, old & operand));
- return old;
-}
-
-#define atomic_fetch_add_explicit(object, operand, order) \
- atomic_fetch_add(object, operand)
-
-#define atomic_fetch_sub_explicit(object, operand, order) \
- atomic_fetch_sub(object, operand)
-
-#define atomic_fetch_or_explicit(object, operand, order) \
- atomic_fetch_or(object, operand)
-
-#define atomic_fetch_xor_explicit(object, operand, order) \
- atomic_fetch_xor(object, operand)
-
-#define atomic_fetch_and_explicit(object, operand, order) \
- atomic_fetch_and(object, operand)
-
-#define atomic_flag_test_and_set(object) \
- atomic_exchange(object, 1)
-
-#define atomic_flag_test_and_set_explicit(object, order) \
- atomic_flag_test_and_set(object)
-
-#define atomic_flag_clear(object) \
- atomic_store(object, 0)
-
-#define atomic_flag_clear_explicit(object, order) \
- atomic_flag_clear(object)
-
-#endif /* COMPAT_ATOMICS_SUNCC_STDATOMIC_H */
diff --git a/spaces/colakin/video-generater/public/ffmpeg/doc/style.min.css b/spaces/colakin/video-generater/public/ffmpeg/doc/style.min.css
deleted file mode 100644
index 6843fda57d03d5ed37c7d952248e6b042755e2df..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/doc/style.min.css
+++ /dev/null
@@ -1,23 +0,0 @@
-/*!
-The MIT License (MIT)
-
-Copyright (c) 2014 Barbara Lepage
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
- */body{background-color:#313131;color:#e6e6e6;text-align:justify}body, h1, h2, h3, h4, h5, h6{font-family:"Lucida Grande","Lucida Sans Unicode","Lucida Sans","Helvetica Neue",Helvetica,Verdana,Tahoma,sans-serif}a{color:#4cae4c}a strong{color:#e6e6e6}a:hover{color:#7fc77f}a:hover strong{color:#4cae4c}main{width:100% ! important;min-height:600px;margin:auto}h1, h2, h3, h4{font-weight:bold;text-align:left}h1, h2, h3{color:#bebebe}h1 strong, h2 strong, h3 strong{color:#e6e6e6}h4, h5, h6{color:#3c8b3c}h1{border-bottom:4px #bebebe solid;padding:20px 2%}h3{border-bottom:2px #bebebe solid;padding:15px 1%}h4{border-bottom:1px solid #e6e6e6;padding:10px 0;margin:20px 0;color:#e6e6e6}.list-group .list-group-item{background-color:#3e3e3e;border-color:black}.list-group.list-group-big .list-group-item{padding:25px}.list-group a.list-group-item{color:#7fc77f}.list-group a.list-group-item:hover{background-color:#313131;color:#4cae4c}.well{background-color:#242424;border-color:black;color:#bebebe}.well strong{color:#e6e6e6}.well code{background-color:#313131}.well hr{border-color:#3c8b3c}.well h3{margin:5px 0 15px 0;border:0;padding:0}.well a{color:#4cae4c}.well a.btn{color:white}.well small{display:block;padding:0 10px;font-style:italic}.well.example{padding-top:40px;margin-bottom:130px}.well.example pre{margin:50px;margin-bottom:30px;font-size:1.5em}.well.example .btn{margin-right:50px;margin-bottom:20px}.well.well-with-icon{min-height:136px}.well.well-with-icon .pull-right,.well.well-with-icon .pull-left{background-color:#4cae4c;color:#e6e6e6;padding:10px;border-radius:5px;margin:5px}.well.well-with-icon .pull-right{margin-left:20px}.well.well-with-icon .pull-left{margin-right:20px}a.well{display:block}a.well:hover{text-decoration:none;opacity:0.8}.info, .warning{margin:10px;padding:10px;background-color:#3e3e3e;color:#e6e6e6}.info code, .warning code{background-color:#313131}.info{border-left:10px #4cae4c solid}.warning{border-left:10px #ae4c4c solid}.with-icon{padding:30px}.with-icon .pull-left{padding-right:30px}.with-icon .pull-right{padding-left:30px}dd{margin-left:20px}code{background-color:#242424;color:#7fc77f;display:inline-block;margin:5px}.table{margin:20px 0;border-radius:4px}.table th,.table td,.table tr{border:1px solid #171717}.table tr th{background-color:#3e3e3e;border-bottom:2px solid #e6e6e6}.table tr:nth-child(odd){background-color:#242424}#sidebar-wrapper, .navbar{background-color:#171717;overflow-x:hidden}#sidebar-wrapper .sidebar-brand img,#sidebar-wrapper .navbar-brand img, .navbar .sidebar-brand img, .navbar .navbar-brand img{opacity:0.6;margin-right:8px}#sidebar-wrapper .sidebar-brand:hover,#sidebar-wrapper .navbar-brand:hover, .navbar .sidebar-brand:hover, .navbar .navbar-brand:hover{color:#fff}#sidebar-wrapper .sidebar-brand:hover img,#sidebar-wrapper .navbar-brand:hover img, .navbar .sidebar-brand:hover img, .navbar .navbar-brand:hover img{opacity:1}#sidebar-wrapper .sidebar-nav li ul, .navbar .sidebar-nav li ul{list-style-type:none;padding:0}#sidebar-wrapper .sidebar-nav li ul li, .navbar .sidebar-nav li ul li{line-height:20px}#sidebar-wrapper .sidebar-nav li ul li a, .navbar .sidebar-nav li ul li a{padding-left:20px}.content-header{height:auto;background-color:#242424}.content-header h1{color:#e6e6e6;display:block;margin:0;margin-bottom:20px;line-height:normal;border-bottom:none}#download h4, #index h4{margin-top:180px}#download h4.first, #index h4.first{margin-top:20px}#download h4.first small, #index h4.first small{color:inherit;font-size:1em}#download .btn-download-wrapper, #index .btn-download-wrapper{text-align:center;margin:160px auto}#download .btn-download-wrapper .btn, #index .btn-download-wrapper .btn{font-size:3em;padding:3%;display:inline-block;margin-bottom:5px}#download .btn-download-wrapper small, #index .btn-download-wrapper small{display:block;font-size:0.4em}#download h2.description, #index h2.description{color:#e6e6e6;font-size:2em;font-weight:bold;margin:120px 50px;line-height:2em}#download h2.description .label, #index h2.description .label{font-size:0.5em}#download .btn-download-wrapper{margin:40px auto}#download .os-selector{text-align:center;color:#e6e6e6;margin:30px 0}#download .os-selector a.btn-build{color:#e6e6e6;display:block;padding:20px;border-radius:2px}#download .os-selector .btn-build[href="#build-linux"]{background-color:#e43}#download .os-selector .btn-build[href="#build-linux"]:hover{color:#e43;background-color:#e6e6e6}#download .os-selector .btn-build[href="#build-windows"]{background-color:#06a}#download .os-selector .btn-build[href="#build-windows"]:hover{color:#06a;background-color:#e6e6e6}#download .os-selector .btn-build[href="#build-mac"]{background-color:darkgrey}#download .os-selector .btn-build[href="#build-mac"]:hover{color:darkgrey;background-color:#e6e6e6}#download .os-selector .tab-content{margin-top:20px}#download .os-selector #build-linux h3{color:#e43}#download .os-selector #build-windows h3{color:#06a}#download .os-selector #build-mac h3{color:darkgrey}footer{background-color:#242424;border-top:1px #101010 solid;padding:20px 0%}footer a{display:block}footer img[alt="FFmpeg"]{width:50%;display:block;margin:auto}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vorbisdsp_init_arm.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vorbisdsp_init_arm.c
deleted file mode 100644
index acda34f46841fffe720fb7d2f609f4256766d972..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vorbisdsp_init_arm.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * ARM NEON optimised DSP functions
- * Copyright (c) 2008 Mans Rullgard
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/attributes.h"
-#include "libavutil/cpu.h"
-#include "libavutil/arm/cpu.h"
-#include "libavcodec/vorbisdsp.h"
-
-void ff_vorbis_inverse_coupling_neon(float *mag, float *ang,
- ptrdiff_t blocksize);
-
-av_cold void ff_vorbisdsp_init_arm(VorbisDSPContext *c)
-{
- int cpu_flags = av_get_cpu_flags();
-
- if (have_neon(cpu_flags)) {
- c->vorbis_inverse_coupling = ff_vorbis_inverse_coupling_neon;
- }
-}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cfhddsp.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cfhddsp.c
deleted file mode 100644
index a141db5246999c35e52d251f54d3125483bbb710..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cfhddsp.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2015-2016 Kieran Kunhya
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/attributes.h"
-#include "libavutil/common.h"
-
-#include "cfhddsp.h"
-
-static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride,
- const int16_t *low, ptrdiff_t low_stride,
- const int16_t *high, ptrdiff_t high_stride,
- int len, int clip)
-{
- int16_t tmp;
- int i;
-
- tmp = (11*low[0*low_stride] - 4*low[1*low_stride] + low[2*low_stride] + 4) >> 3;
- output[(2*0+0)*out_stride] = (tmp + high[0*high_stride]) >> 1;
- if (clip)
- output[(2*0+0)*out_stride] = av_clip_uintp2_c(output[(2*0+0)*out_stride], clip);
-
- tmp = ( 5*low[0*low_stride] + 4*low[1*low_stride] - low[2*low_stride] + 4) >> 3;
- output[(2*0+1)*out_stride] = (tmp - high[0*high_stride]) >> 1;
- if (clip)
- output[(2*0+1)*out_stride] = av_clip_uintp2_c(output[(2*0+1)*out_stride], clip);
-
- for (i = 1; i < len - 1; i++) {
- tmp = (low[(i-1)*low_stride] - low[(i+1)*low_stride] + 4) >> 3;
- output[(2*i+0)*out_stride] = (tmp + low[i*low_stride] + high[i*high_stride]) >> 1;
- if (clip)
- output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
-
- tmp = (low[(i+1)*low_stride] - low[(i-1)*low_stride] + 4) >> 3;
- output[(2*i+1)*out_stride] = (tmp + low[i*low_stride] - high[i*high_stride]) >> 1;
- if (clip)
- output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
- }
-
- tmp = ( 5*low[i*low_stride] + 4*low[(i-1)*low_stride] - low[(i-2)*low_stride] + 4) >> 3;
- output[(2*i+0)*out_stride] = (tmp + high[i*high_stride]) >> 1;
- if (clip)
- output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
-
- tmp = (11*low[i*low_stride] - 4*low[(i-1)*low_stride] + low[(i-2)*low_stride] + 4) >> 3;
- output[(2*i+1)*out_stride] = (tmp - high[i*high_stride]) >> 1;
- if (clip)
- output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
-}
-
-static void vert_filter(int16_t *output, ptrdiff_t out_stride,
- const int16_t *low, ptrdiff_t low_stride,
- const int16_t *high, ptrdiff_t high_stride,
- int width, int height)
-{
- for (int i = 0; i < width; i++) {
- filter(output, out_stride, low, low_stride, high, high_stride, height, 0);
- low++;
- high++;
- output++;
- }
-}
-
-static void horiz_filter(int16_t *output, ptrdiff_t ostride,
- const int16_t *low, ptrdiff_t lstride,
- const int16_t *high, ptrdiff_t hstride,
- int width, int height)
-{
- for (int i = 0; i < height; i++) {
- filter(output, 1, low, 1, high, 1, width, 0);
- low += lstride;
- high += hstride;
- output += ostride * 2;
- }
-}
-
-static void horiz_filter_clip(int16_t *output, const int16_t *low, const int16_t *high,
- int width, int clip)
-{
- filter(output, 1, low, 1, high, 1, width, clip);
-}
-
-static void horiz_filter_clip_bayer(int16_t *output, const int16_t *low, const int16_t *high,
- int width, int clip)
-{
- filter(output, 2, low, 1, high, 1, width, clip);
-}
-
-av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
-{
- c->horiz_filter = horiz_filter;
- c->vert_filter = vert_filter;
-
- if (bayer)
- c->horiz_filter_clip = horiz_filter_clip_bayer;
- else
- c->horiz_filter_clip = horiz_filter_clip;
-
-#if ARCH_X86
- ff_cfhddsp_init_x86(c, depth, bayer);
-#endif
-}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/codec_par.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/codec_par.h
deleted file mode 100644
index f51d27c5908449354badf3e00cf5e3002a4f97ac..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/codec_par.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Codec parameters public API
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_CODEC_PAR_H
-#define AVCODEC_CODEC_PAR_H
-
-#include
-
-#include "libavutil/avutil.h"
-#include "libavutil/channel_layout.h"
-#include "libavutil/rational.h"
-#include "libavutil/pixfmt.h"
-
-#include "codec_id.h"
-
-/**
- * @addtogroup lavc_core
- * @{
- */
-
-enum AVFieldOrder {
- AV_FIELD_UNKNOWN,
- AV_FIELD_PROGRESSIVE,
- AV_FIELD_TT, ///< Top coded_first, top displayed first
- AV_FIELD_BB, ///< Bottom coded first, bottom displayed first
- AV_FIELD_TB, ///< Top coded first, bottom displayed first
- AV_FIELD_BT, ///< Bottom coded first, top displayed first
-};
-
-/**
- * This struct describes the properties of an encoded stream.
- *
- * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must
- * be allocated with avcodec_parameters_alloc() and freed with
- * avcodec_parameters_free().
- */
-typedef struct AVCodecParameters {
- /**
- * General type of the encoded data.
- */
- enum AVMediaType codec_type;
- /**
- * Specific type of the encoded data (the codec used).
- */
- enum AVCodecID codec_id;
- /**
- * Additional information about the codec (corresponds to the AVI FOURCC).
- */
- uint32_t codec_tag;
-
- /**
- * Extra binary data needed for initializing the decoder, codec-dependent.
- *
- * Must be allocated with av_malloc() and will be freed by
- * avcodec_parameters_free(). The allocated size of extradata must be at
- * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding
- * bytes zeroed.
- */
- uint8_t *extradata;
- /**
- * Size of the extradata content in bytes.
- */
- int extradata_size;
-
- /**
- * - video: the pixel format, the value corresponds to enum AVPixelFormat.
- * - audio: the sample format, the value corresponds to enum AVSampleFormat.
- */
- int format;
-
- /**
- * The average bitrate of the encoded data (in bits per second).
- */
- int64_t bit_rate;
-
- /**
- * The number of bits per sample in the codedwords.
- *
- * This is basically the bitrate per sample. It is mandatory for a bunch of
- * formats to actually decode them. It's the number of bits for one sample in
- * the actual coded bitstream.
- *
- * This could be for example 4 for ADPCM
- * For PCM formats this matches bits_per_raw_sample
- * Can be 0
- */
- int bits_per_coded_sample;
-
- /**
- * This is the number of valid bits in each output sample. If the
- * sample format has more bits, the least significant bits are additional
- * padding bits, which are always 0. Use right shifts to reduce the sample
- * to its actual size. For example, audio formats with 24 bit samples will
- * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32.
- * To get the original sample use "(int32_t)sample >> 8"."
- *
- * For ADPCM this might be 12 or 16 or similar
- * Can be 0
- */
- int bits_per_raw_sample;
-
- /**
- * Codec-specific bitstream restrictions that the stream conforms to.
- */
- int profile;
- int level;
-
- /**
- * Video only. The dimensions of the video frame in pixels.
- */
- int width;
- int height;
-
- /**
- * Video only. The aspect ratio (width / height) which a single pixel
- * should have when displayed.
- *
- * When the aspect ratio is unknown / undefined, the numerator should be
- * set to 0 (the denominator may have any value).
- */
- AVRational sample_aspect_ratio;
-
- /**
- * Video only. The order of the fields in interlaced video.
- */
- enum AVFieldOrder field_order;
-
- /**
- * Video only. Additional colorspace characteristics.
- */
- enum AVColorRange color_range;
- enum AVColorPrimaries color_primaries;
- enum AVColorTransferCharacteristic color_trc;
- enum AVColorSpace color_space;
- enum AVChromaLocation chroma_location;
-
- /**
- * Video only. Number of delayed frames.
- */
- int video_delay;
-
-#if FF_API_OLD_CHANNEL_LAYOUT
- /**
- * Audio only. The channel layout bitmask. May be 0 if the channel layout is
- * unknown or unspecified, otherwise the number of bits set must be equal to
- * the channels field.
- * @deprecated use ch_layout
- */
- attribute_deprecated
- uint64_t channel_layout;
- /**
- * Audio only. The number of audio channels.
- * @deprecated use ch_layout.nb_channels
- */
- attribute_deprecated
- int channels;
-#endif
- /**
- * Audio only. The number of audio samples per second.
- */
- int sample_rate;
- /**
- * Audio only. The number of bytes per coded audio frame, required by some
- * formats.
- *
- * Corresponds to nBlockAlign in WAVEFORMATEX.
- */
- int block_align;
- /**
- * Audio only. Audio frame size, if known. Required by some formats to be static.
- */
- int frame_size;
-
- /**
- * Audio only. The amount of padding (in samples) inserted by the encoder at
- * the beginning of the audio. I.e. this number of leading decoded samples
- * must be discarded by the caller to get the original audio without leading
- * padding.
- */
- int initial_padding;
- /**
- * Audio only. The amount of padding (in samples) appended by the encoder to
- * the end of the audio. I.e. this number of decoded samples must be
- * discarded by the caller from the end of the stream to get the original
- * audio without any trailing padding.
- */
- int trailing_padding;
- /**
- * Audio only. Number of samples to skip after a discontinuity.
- */
- int seek_preroll;
-
- /**
- * Audio only. The channel layout and number of channels.
- */
- AVChannelLayout ch_layout;
-} AVCodecParameters;
-
-/**
- * Allocate a new AVCodecParameters and set its fields to default values
- * (unknown/invalid/0). The returned struct must be freed with
- * avcodec_parameters_free().
- */
-AVCodecParameters *avcodec_parameters_alloc(void);
-
-/**
- * Free an AVCodecParameters instance and everything associated with it and
- * write NULL to the supplied pointer.
- */
-void avcodec_parameters_free(AVCodecParameters **par);
-
-/**
- * Copy the contents of src to dst. Any allocated fields in dst are freed and
- * replaced with newly allocated duplicates of the corresponding fields in src.
- *
- * @return >= 0 on success, a negative AVERROR code on failure.
- */
-int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src);
-
-/**
- * This function is the same as av_get_audio_frame_duration(), except it works
- * with AVCodecParameters instead of an AVCodecContext.
- */
-int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes);
-
-/**
- * @}
- */
-
-#endif // AVCODEC_CODEC_PAR_H
diff --git a/spaces/compasspathways/Sentiment3D/app.py b/spaces/compasspathways/Sentiment3D/app.py
deleted file mode 100644
index 5e238f5e106f457a54d6a70f8dec022210072454..0000000000000000000000000000000000000000
--- a/spaces/compasspathways/Sentiment3D/app.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import gradio as gr
-import pandas as pd
-from sentiment3d import Sentiment3D
-
-s3d = Sentiment3D()
-
-TITLE = "COMPASS Pathways: 3D Sentiment Model"
-EXAMPLES = [
- "This is so awesome!",
- "You're driving me up the wall!",
- "I'm so lonely I could cry.",
- "I'm not feeling very sad at all.",
- "A day without sunshine is like, you know, night.",
- "Yes, that's how I feel [laughing].",
- "Yes, that's how I feel [sobbing].",
- "Now I hear what you're sayin' 😀",
- "Now I hear what you're sayin' 🙁",
-]
-
-
-def sentiment(text, state):
- sent = s3d(text)
- res = dict(text=text, valence=sent['valence'], arousal=sent['arousal'], confidence=sent['confidence'], words=len(text.split()))
- #if clear_history:
- # state = []
- if state == None:
- state = []
- state.append(res)
- df = pd.DataFrame(state)
- res_txt = [
- f"{r['text']}: \n valence = {r['valence']:0.3f}, arousal = {r['arousal']:0.3f}, confidence = {r['confidence']:0.3f}"
- for r in state
- ]
- return "\n".join(res_txt), df, df, df, state
-
-
-iface = gr.Interface(
- fn=sentiment,
- inputs=[gr.Textbox(lines=1, placeholder="Text for 3d sentiment..."), "state"],
- outputs=[
- gr.Textbox(lines=5, max_lines=5, label="Results"),
- gr.ScatterPlot(
- x="valence",
- y="arousal",
- tooltip="text",
- size="words",
- size_legend_position="none",
- interactive=False,
- x_lim=[-1.05, 1.05],
- y_lim=[-1.05, 1.05],
- ),
- gr.ScatterPlot(
- x="valence",
- y="confidence",
- tooltip="text",
- size="words",
- size_legend_position="none",
- interactive=False,
- x_lim=[-1.05, 1.05],
- y_lim=[-1.05, 1.05],
- ),
- gr.ScatterPlot(
- x="arousal",
- y="confidence",
- tooltip="text",
- size="words",
- size_legend_position="none",
- interactive=False,
- x_lim=[-1.05, 1.05],
- y_lim=[-1.05, 1.05],
- ),
- "state",
- ],
- title=TITLE,
- examples=EXAMPLES
-)
-iface.launch()
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Talking Tom Hero Dash Full APK - The Most Fun and Addictive Endless Runner Game.md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy Talking Tom Hero Dash Full APK - The Most Fun and Addictive Endless Runner Game.md
deleted file mode 100644
index 19dd80db7f2fd9c267f41f723fe7d338ac5d0b70..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Talking Tom Hero Dash Full APK - The Most Fun and Addictive Endless Runner Game.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-Talking Tom Hero Dash Full APK: A Fun and Action-Packed Game for Android
-Do you love running games? Do you enjoy playing with cute and funny characters? Do you want to experience an epic adventure with amazing graphics and sound effects? If you answered yes to any of these questions, then you should try Talking Tom Hero Dash , one of the most popular games on Google Play Store.
-talking tom hero dash full apk Download File ⚹⚹⚹ https://urlca.com/2uOgat
-Talking Tom Hero Dash is a fun and action-packed game that features your favorite characters from the Talking Tom series. You can play as Tom, Angela, Hank, Ginger, or Ben, each with their own superpowers and vehicles. Your mission is to run through different worlds, collect coins and power-ups, defeat raccoon bosses, save your friends, and rebuild your city.
-Talking Tom Hero Dash is more than just a running game. It also has elements of strategy, customization, humor, and social interaction. You can unlock new outfits and vehicles for your heroes, upgrade your city with cool buildings and decorations, watch funny videos of your heroes' antics, and share your achievements with your friends online.
-If you want to enjoy all these features and more, you should download Talking Tom Hero Dash Full APK , which is the full version of the game that includes all the updates and features. You can download it for free from a trusted source and install it on your Android device in a few simple steps. Here's how:
- How to Download Talking Tom Hero Dash Full APK
-Step 1: Enable Unknown Sources on Your Device
-Before you can install the APK file, you need to allow your device to install apps from sources other than Google Play Store. To do this, go to your device's settings and look for the security or privacy option. Then, find the option that says "Unknown sources" or "Allow installation of apps from unknown sources" and enable it. You may see a warning message that says installing apps from unknown sources can harm your device, but don't worry, as long as you download the APK file from a reliable source, you should be fine.
-Step 2: Download the APK File from a Trusted Source
-Now that you have enabled unknown sources, you can download the APK file from a trusted source. There are many websites that offer APK files for free, but not all of them are safe and reliable. Some of them may contain malware or viruses that can harm your device or steal your personal information. To avoid this, you should only download the APK file from a reputable website that has positive reviews and ratings from other users. One such website is APKPure.com , which is one of the most popular and trusted sources for APK files. You can download Talking Tom Hero Dash Full APK from this link: https://apkpure.com/talking-tom-hero-dash-run-game/com.outfit7.herodash/download?from=details .
-Step 3: Install and Launch the Game
-Once you have downloaded the APK file, you can install it on your device by tapping on it and following the instructions on the screen. The installation process may take a few minutes, depending on your device's speed and memory. After the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You may see a message that asks you to grant some permissions to the game, such as access to your storage, camera, microphone, or location. You can allow or deny these permissions as you wish, but keep in mind that some features of the game may not work properly if you deny them.
- How to Play Talking Tom Hero Dash
-Choose Your Hero
-When you start the game, you will see a screen that shows you the five heroes that you can play as: Tom, Angela, Hank, Ginger, or Ben. Each hero has their own superpower and vehicle that they can use in the game. For example, Tom can fly with his jetpack, Angela can use her pink scooter, Hank can shoot lasers with his glasses, Ginger can throw fireballs with his slingshot, and Ben can hack computers with his laptop. You can choose any hero that you like by tapping on their picture. You can also switch heroes anytime during the game by tapping on the hero icon at the top left corner of the screen.
-talking tom hero dash mod apk unlimited money
-talking tom hero dash apk download for android
-talking tom hero dash hack apk latest version
-talking tom hero dash game free download apk
-talking tom hero dash offline apk mod
-talking tom hero dash apk pure no ads
-talking tom hero dash full unlocked apk
-talking tom hero dash apk rexdl premium
-talking tom hero dash mod apk revdl unlimited coins
-talking tom hero dash apk obb data file
-talking tom hero dash apk mirror pro
-talking tom hero dash mod apk android 1
-talking tom hero dash apk uptodown update
-talking tom hero dash hack apk 2023 download
-talking tom hero dash full version apk free
-talking tom hero dash mod apk happymod gems
-talking tom hero dash apk mod menu vip
-talking tom hero dash apk apkpure original
-talking tom hero dash hack apk ios devices
-talking tom hero dash full game apk online
-talking tom hero dash mod apk an1.com cheats
-talking tom hero dash apk for pc windows 10
-talking tom hero dash hack apk unlimited everything
-talking tom hero dash full hd apk graphics
-talking tom hero dash mod apk 4.0.0.4275 latest
-talking tom hero dash apk mob.org direct link
-talking tom hero dash hack tool apk generator
-talking tom hero dash full cracked apk download
-talking tom hero dash modded apk no root required
-talking tom hero dash original apk file size
-talking tom hero dash hacked version apk features
-talking tom hero dash full gameplay apk video
-talking tom hero dash mod money and gems apk
-talking tom hero dash official site apk installer
-talking tom hero dash unlimited coins and keys apk
-Run, Jump, and Slide Through Different Worlds
-The main gameplay of Talking Tom Hero Dash is similar to other running games such as Subway Surfers, Temple Run, or Sonic Dash. You have to control your hero's movements by swiping left or right to change lanes, swiping up to jump over obstacles, and swiping down to slide under them. You have to avoid crashing into obstacles such as cars, buses, trains, barrels, fences, or raccoons. If you crash into an obstacle three times in a row, you will lose the game and have to start over.
-The game has different worlds that you can run through, such as city streets, tropical islands, Chinese temples, snowy mountains, or space stations. Each world has its own theme and challenges that make it unique and fun. You can unlock new worlds by collecting enough stars in each level. Stars are awarded based on how far you run, how many coins you collect, how many raccoons you defeat, and how many friends you save.
-Collect Coins, Power-Ups, and Rewards
-As you run through the worlds, you will see coins scattered along the way. You can collect these coins by running over them or using power-ups such as magnets that attract them to you. Coins are used to buy new outfits and vehicles for your heroes or upgrade your city with new buildings and decorations.
-You will also see power-ups that appear randomly on the road. These power-ups can help you run faster, avoid obstacles, or defeat raccoons. Some of the power-ups are shields that protect you from crashes, rockets that launch you into the air, or helmets that make you invincible. You can activate these power-ups by tapping on them when they appear on the screen.
-Besides coins and power-ups, you will also see rewards that you can collect by completing certain tasks or achievements. These rewards include gems, chests, stickers, or cards. Gems are used to revive your hero if you lose the game or to unlock new worlds faster. Chests contain random items such as coins, gems, power-ups, or outfits. Stickers are used to decorate your city with graffiti or posters. Cards are used to unlock new heroes or upgrade their abilities.
-Defeat Raccoon Bosses and Save Your Friends
-The main villains of Talking Tom Hero Dash are the raccoons, who have invaded your city and kidnapped your friends. You have to fight against them and rescue your friends in each world. The raccoons come in different sizes and colors, and they have different weapons and attacks. Some of them throw bombs, some of them shoot lasers, and some of them ride motorcycles or helicopters. You have to dodge their attacks and hit them with your superpower or power-ups until they are defeated.
-At the end of each world, you will face a raccoon boss, who is bigger and stronger than the regular raccoons. The raccoon boss has a health bar that shows how much damage you have done to him. You have to hit him multiple times with your superpower or power-ups until his health bar is empty. Once you defeat the raccoon boss, you will free one of your friends who has been trapped in a cage. You can then play as that friend in the next world.
-Customize Your Heroes and Their Vehicles
-One of the most fun features of Talking Tom Hero Dash is that you can customize your heroes and their vehicles with different outfits and accessories. You can change their clothes, hats, glasses, shoes, masks, or capes. You can also change their vehicles' colors, wheels, stickers, or lights. You can buy these items with coins or gems, or get them from chests or cards. You can also mix and match different items to create your own unique style.
-Customizing your heroes and their vehicles not only makes them look cool, but also gives them some advantages in the game. For example, some outfits give you extra coins or gems, some vehicles give you extra speed or power-ups, and some combinations give you special effects or bonuses. You can see these benefits by tapping on the item's icon before buying it.
- How Does Talking Tom Hero Dash Compare with Other Games?
-Talking Tom Hero Dash is not the only running game available on Google Play Store. There are many other games that have similar gameplay and features, such as Subway Surfers, Temple Run, or Sonic Dash. How does Talking Tom Hero Dash compare with these games? Here is a table that shows some of the differences and similarities between them:
-
-
-Game
-Graphics
-Gameplay
-Ratings
-
-
-Talking Tom Hero Dash
-3D cartoon style with bright colors and detailed animations
-Run through different worlds with various themes and challenges; collect coins and power-ups; defeat raccoon bosses and save your friends; customize your heroes and their vehicles; rebuild your city
-4.5 stars out of 5 based on 2 million reviews
-
-
-Subway Surfers
-3D cartoon style with vibrant colors and smooth animations
-Run through different cities with diverse cultures and landmarks; collect coins and power-ups; dodge trains and obstacles; unlock new characters and boards; join seasonal events
-4.4 stars out of 5 based on 36 million reviews
-
-
-Temple Run
-3D realistic style with dark colors and dynamic shadows
-Run through ancient temples with mysterious secrets and dangers; collect coins and power-ups; avoid traps and obstacles; unlock new characters and abilities; escape from evil monkeys
-4.1 stars out of 5 based on 5 million reviews
-
-
-Sonic Dash
-3D cartoon style with vivid colors and fast-paced animations
-Run through iconic locations from the Sonic universe; collect rings and power-ups; dash and spin to destroy enemies; unlock new characters and skills; join epic boss battles
-4.4 stars out of 5 based on 5 million reviews
-
-
- As you can see, Talking Tom Hero Dash has some advantages and disadvantages compared to other games. It has better graphics, more gameplay options, and higher ratings than Temple Run, but it has less variety, more ads, and lower ratings than Subway Surfers or Sonic Dash. Ultimately, the choice of which game to play depends on your personal preference and taste. However, if you are looking for a fun and action-packed game that features your favorite Talking Tom characters, then you should definitely give Talking Tom Hero Dash a try.
- Conclusion
-Talking Tom Hero Dash is a fun and action-packed game that you can download and play on your Android device for free. It features your favorite Talking Tom characters as superheroes who run through different worlds, collect coins and power-ups, defeat raccoon bosses, save their friends, and rebuild their city. You can also customize your heroes and their vehicles with different outfits and accessories, and share your achievements with your friends online. Talking Tom Hero Dash is a game that will keep you entertained and engaged for hours.
-If you want to download Talking Tom Hero Dash Full APK, which is the full version of the game that includes all the updates and features, you can follow the simple steps that we have explained in this article. You just need to enable unknown sources on your device, download the APK file from a trusted source, install and launch the game, and enjoy. It's that easy.
-So what are you waiting for? Download Talking Tom Hero Dash Full APK today and join the adventure with your favorite Talking Tom heroes. You won't regret it.
- FAQs
-Here are some frequently asked questions that you might have about Talking Tom Hero Dash or APK files:
-
-What is an APK file?
-An APK file is an Android Package file that contains all the files and data needed to install an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac.
-Why do I need to download an APK file instead of installing the app from Google Play Store?
-Sometimes, you might want to download an APK file instead of installing the app from Google Play Store for various reasons. For example, you might want to access an app that is not available in your region or country, or you might want to get the latest version of an app that has not been updated on Google Play Store yet, or you might want to get the full version of an app that has some features locked or restricted on Google Play Store.
-Is it safe to download and install APK files?
-It depends on where you download the APK files from. Some websites may offer APK files that are infected with malware or viruses that can harm your device or steal your personal information. To avoid this, you should only download APK files from reputable websites that have positive reviews and ratings from other users. You should also scan the APK files with an antivirus software before installing them.
-How do I update an app that I installed from an APK file?
-If you installed an app from an APK file, you will not receive automatic updates from Google Play Store. You will have to manually download and install the new version of the APK file from the same source that you got it from. Alternatively, you can uninstall the app and install it from Google Play Store if it is available there.
-How do I uninstall an app that I installed from an APK file?
-You can uninstall an app that you installed from an APK file in the same way that you uninstall any other app on your device. You can go to your device's settings and look for the apps or applications option. Then, find the app that you want to uninstall and tap on it. You will see an option that says "Uninstall" or "Remove". Tap on it and confirm your action.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Texas Holdem Poker Online Mod Apk Play Free Poker Games Anytime Anywhere.md b/spaces/congsaPfin/Manga-OCR/logs/Texas Holdem Poker Online Mod Apk Play Free Poker Games Anytime Anywhere.md
deleted file mode 100644
index 105961295b4c5aeef998bb3a56fa9a8ce05567cf..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Texas Holdem Poker Online Mod Apk Play Free Poker Games Anytime Anywhere.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-Texas Holdem Poker Online Mod APK: How to Play and Win
-Do you love playing poker but don't have the time or money to visit a casino? Do you want to enjoy the thrill of winning big without risking your real cash? If you answered yes to any of these questions, then you should try Texas Holdem Poker Online Mod APK. This is a modified version of the popular card game that lets you play online with other players from around the world. You can also get unlimited money, chips, and coins to bet as much as you want. In this article, we will show you how to download, install, and play Texas Holdem Poker Online Mod APK. We will also give you some tips on how to win more often and have more fun.
- Introduction
-What is Texas Holdem Poker Online Mod APK?
-Texas Holdem Poker Online Mod APK is a hacked version of the original game that gives you access to some features that are not available in the official app. For example, you can get unlimited money, chips, and coins to play with. You can also unlock all the game modes, tables, and tournaments that are otherwise restricted or require in-app purchases. You can also enjoy faster loading times, smoother gameplay, and better graphics. With Texas Holdem Poker Online Mod APK, you can experience the best of online poker without spending a dime.
-texas holdem poker online mod apk Download ✅ https://urlca.com/2uOcda
- Why should you play Texas Holdem Poker Online Mod APK?
-There are many reasons why you should play Texas Holdem Poker Online Mod APK. Here are some of them:
-
-You can play online with millions of other players from different countries and regions.
-You can choose from various game modes, such as cash games, tournaments, sit and go, and more.
-You can customize your avatar, profile, and table settings according to your preferences.
-You can chat with other players, send gifts, and make friends.
-You can learn the rules and strategies of Texas Holdem Poker from tutorials, tips, and guides.
-You can improve your skills, confidence, and ranking by playing regularly.
-You can have fun, relax, and relieve stress by playing poker anytime, anywhere.
-
- How to download and install Texas Holdem Poker Online Mod APK?
-Step 1: Find a reliable source for the mod apk file
-The first step to download and install Texas Holdem Poker Online Mod APK is to find a trustworthy website that offers the mod apk file. You can use Google or any other search engine to look for one. However, be careful not to download from shady or suspicious sites that may contain viruses or malware. One of the best sources for the mod apk file is Apkloli.com , which is a website that offers many popular card games. You can get the free download of Texas Holdem Poker Online Mod APK version v123.1.18 here.
- Step 2: Enable unknown sources on your device
-The next step is to enable unknown sources on your device. This is necessary
because the mod apk file is not from the official Google Play Store. To do this, go to your device's settings and look for the security or privacy option. Then, find the unknown sources option and toggle it on. This will allow you to install apps from sources other than the Google Play Store.
- Step 3: Download and install the mod apk file
-The final step is to download and install the mod apk file. To do this, go to the website where you found the mod apk file and click on the download button. Wait for the file to be downloaded to your device. Then, locate the file in your device's storage and tap on it. Follow the instructions on the screen to complete the installation process. Once the installation is done, you can launch the app and start playing Texas Holdem Poker Online Mod APK.
- How to play Texas Holdem Poker Online Mod APK?
-Step 1: Create an account or log in with Facebook
-When you open the app for the first time, you will be asked to create an account or log in with Facebook. You can choose either option depending on your preference. Creating an account will require you to enter your username, email, and password. Logging in with Facebook will require you to grant permission to access your profile information. Either way, you will be able to access all the features of Texas Holdem Poker Online Mod APK once you are logged in.
- Step 2: Choose a game mode and a table
-The next step is to choose a game mode and a table that suits your skill level and budget. You can choose from various game modes, such as cash games, tournaments, sit and go, and more. You can also filter the tables by stakes, players, speed, and type. You can join any table that has an empty seat or create your own table and invite your friends. You can also switch tables anytime you want.
- Step 3: Learn the basic rules and strategies of Texas Holdem Poker
-The last step is to learn the basic rules and strategies of Texas Holdem Poker. If you are new to poker, you can check out the tutorials, tips, and guides that are available in the app. You can also practice with free chips or play money before you bet with real money. The basic rules of Texas Holdem Poker are as follows:
-
-Each player is dealt two cards face down, called hole cards.
-A round of betting takes place, starting with the player to the left of the big blind.
-Three cards are dealt face up on the table, called the flop.
-Another round of betting takes place, starting with the player to the left of the dealer.
-A fourth card is dealt face up on the table, called the turn.
-Another round of betting takes place, starting with the player to the left of the dealer.
-A fifth card is dealt face up on the table, called the river.
-A final round of betting takes place, starting with the player to the left of the dealer.
-The remaining players show their cards and compare their best five-card poker hand.
-The player with the best hand wins the pot.
-
-The basic strategies of Texas Holdem Poker are as follows:
-free texas holdem poker online mod apk download
-zynga poker texas holdem online mod apk unlimited chips
-texas holdem poker online mod apk android 1
-texas holdem poker online mod apk latest version
-texas holdem poker online mod apk hack
-texas holdem poker online mod apk 2023
-texas holdem poker online mod apk offline
-texas holdem poker online mod apk revdl
-texas holdem poker online mod apk rexdl
-texas holdem poker online mod apk happymod
-texas holdem poker online mod apk no root
-texas holdem poker online mod apk anti ban
-texas holdem poker online mod apk vip
-texas holdem poker online mod apk pro
-texas holdem poker online mod apk premium
-texas holdem poker online mod apk cracked
-texas holdem poker online mod apk cheat
-texas holdem poker online mod apk generator
-texas holdem poker online mod apk money
-texas holdem poker online mod apk gold
-texas holdem poker online mod apk diamonds
-texas holdem poker online mod apk coins
-texas holdem poker online mod apk cash
-texas holdem poker online mod apk bonus
-texas holdem poker online mod apk rewards
-texas holdem poker online mod apk free spins
-texas holdem poker online mod apk mega win
-texas holdem poker online mod apk jackpot
-texas holdem poker online mod apk tournaments
-texas holdem poker online mod apk leagues
-texas holdem poker online mod apk events
-texas holdem poker online mod apk challenges
-texas holdem poker online mod apk missions
-texas holdem poker online mod apk quests
-texas holdem poker online mod apk achievements
-texas holdem poker online mod apk gifts
-texas holdem poker online mod apk friends
-texas holdem poker online mod apk invite codes
-texas holdem poker online mod apk referral codes
-texas holdem poker online mod apk redeem codes
-texas holdem poker online mod apk promo codes
-texas holdem poker online mod apk coupon codes
-texas holdem poker online mod apk reviews
-texas holdem poker online mod apk ratings
-texas holdem poker online mod apk tips and tricks
-texas holdem poker online mod apk guides and tutorials
-texas holdem poker online mod apk strategies and secrets
-texas holdem poker online mod apk rules and regulations
-
-Know when to fold, call, raise, or bluff depending on your cards and position.
-Pay attention to your opponents' actions and patterns and try to guess their cards and intentions.
-Use probability and odds to calculate your chances of winning or losing.
-Avoid tilting or letting your emotions affect your decisions.
-Manage your bankroll wisely and don't bet more than you can afford to lose.
-
- How to win Texas Holdem Poker Online Mod APK?
-Tip 1: Use the unlimited money feature to your advantage
-One of the benefits of playing Texas Holdem Poker Online Mod APK is that you can get unlimited money, chips, and coins to play with. This means that you can bet as much as you want without worrying about running out of money. You can also use this feature to intimidate your opponents or make them think that you have a strong hand. However, be careful not to abuse this feature or make unrealistic bets that will make you look suspicious or foolish.
- Tip 2: Bluff smartly and read your opponents' tells
-Another way to win Texas Holdem Poker Online Mod APK is to bluff smartly and read your opponents' tells. Bluffing is a technique of making your opponents think that you have a better hand than you actually have. This can make them fold or call with weaker hands. However, bluffing is not always effective and can backfire if your opponents call your bluff or have a better hand. Therefore, you should bluff smartly and sparingly, and only when you have a good reason to do so. Some factors that can help you decide when to bluff are: - Your position: Bluffing from a late position can be more successful than bluffing from an early position, as you have more information about your opponents' actions and cards. - Your image: Bluffing from a tight image can be more successful than bluffing from a loose image, as your opponents will respect your bets and assume that you have a strong hand. - Your cards: Bluffing with some outs or potential to improve can be more successful than bluffing with nothing, as you have a chance to win even if you get called. - Your opponents: Bluffing against weak or timid opponents can be more successful than bluffing against strong or aggressive opponents, as they are more likely to fold or be scared by your bets. Reading your opponents' tells is another skill that can help you win Texas Holdem Poker Online Mod APK. Tells are clues or signs that reveal your opponents' emotions, intentions, or cards. They can be verbal or non-verbal, such as facial expressions, body language, gestures, tone of voice, chat messages, betting patterns, and so on. By reading your opponents' tells, you can gain an edge over them and make better decisions. Some examples of common tells are: - A player who looks away or avoids eye contact may be bluffing or hiding something. - A player who smiles or laughs nervously may be nervous or unsure about their hand. - A player who shakes or trembles may be excited or holding a strong hand. - A player who bets quickly or aggressively may be confident or trying to intimidate you. - A player who bets slowly or hesitantly may be weak or trying to trap you.
Tip 3: Practice regularly and learn from your mistakes
-The final tip to win Texas Holdem Poker Online Mod APK is to practice regularly and learn from your mistakes. Poker is a game of skill and luck, and the only way to improve your skill is to play more and study more. You can practice with free chips or play money before you play with real money. You can also watch videos, read books, join forums, and follow blogs of professional poker players. You can also review your own games and analyze your strengths and weaknesses. By practicing regularly and learning from your mistakes, you can become a better poker player and win more often.
- Conclusion
-Summary of the main points
-In conclusion, Texas Holdem Poker Online Mod APK is a modified version of the popular card game that lets you play online with other players from around the world. You can also get unlimited money, chips, and coins to bet as much as you want. To download and install Texas Holdem Poker Online Mod APK, you need to find a reliable source for the mod apk file, enable unknown sources on your device, and follow the instructions on the screen. To play Texas Holdem Poker Online Mod APK, you need to create an account or log in with Facebook, choose a game mode and a table, and learn the basic rules and strategies of Texas Holdem Poker. To win Texas Holdem Poker Online Mod APK, you need to use the unlimited money feature to your advantage, bluff smartly and read your opponents' tells, and practice regularly and learn from your mistakes.
- Call to action
-If you are ready to play Texas Holdem Poker Online Mod APK and have fun, then don't wait any longer. Download the app now and join the millions of poker fans who are enjoying the game every day. You will not regret it!
- FAQs
-Q: Is Texas Holdem Poker Online Mod APK safe to use?
-A: Yes, Texas Holdem Poker Online Mod APK is safe to use as long as you download it from a reputable website that offers virus-free and malware-free files. However, you should always be careful when downloading any app from unknown sources and scan it with an antivirus software before installing it.
- Q: Is Texas Holdem Poker Online Mod APK legal to use?
-A: Yes, Texas Holdem Poker Online Mod APK is legal to use as long as you do not use it for illegal purposes or violate any terms of service of the original app. However, you should always check the laws and regulations of your country or region before playing online poker with real money.
- Q: How can I update Texas Holdem Poker Online Mod APK?
-A: To update Texas Holdem Poker Online Mod APK, you need to download the latest version of the mod apk file from the same website where you downloaded it before. Then, you need to uninstall the old version of the app and install the new version. You can also check the website for any updates or notifications about the mod apk file.
- Q: How can I contact the developer of Texas Holdem Poker Online Mod APK?
-A: To contact the developer of Texas Holdem Poker Online Mod APK, you can visit their official website or their social media pages. You can also send them an email or a message through the app. However, you should not expect a quick or positive response from them, as they may not support or endorse the mod apk file.
- Q: How can I get more free chips and coins in Texas Holdem Poker Online Mod APK?
-A: To get more free chips and coins in Texas Holdem Poker Online Mod APK, you can use the unlimited money feature that comes with the mod apk file. You can also claim daily bonuses, complete achievements, invite friends, watch ads, and participate in events and promotions. You can also buy more chips and coins with real money if you want to support the original app.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Downloading My Clash Royale Matches and Sharing Them with Friends.md b/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Downloading My Clash Royale Matches and Sharing Them with Friends.md
deleted file mode 100644
index a7cc03057325194995707ce3c4f023ba5751becf..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Downloading My Clash Royale Matches and Sharing Them with Friends.md
+++ /dev/null
@@ -1,161 +0,0 @@
-
-How to Download Your Clash Royale Matches
-Clash Royale is a popular real-time strategy game that pits you against other players from around the world in fast-paced card battles. You can collect and upgrade dozens of cards featuring your favorite Clash characters, spells, and defenses, as well as new ones like the Royales. You can also join or form a clan with other players to share cards and battle in clan wars for big rewards.
-If you are a fan of Clash Royale, you might want to download your matches and watch them later. Maybe you want to analyze your mistakes and improve your skills, or maybe you want to relive your epic victories and show them off to your friends. Whatever your reason, downloading your matches is not as hard as you might think. In this article, we will show you two methods to download your Clash Royale matches easily and quickly.
-download my clash royale matches Download ○○○ https://urlca.com/2uO4G7
-Before we start, you need to know how to find your player tag and access your battle log. Your player tag is a unique code that identifies your account in Clash Royale. You can find it by tapping on your name in the top left corner of the screen. Your battle log is where you can see all your recent battles, whether they are wins, losses, draws, or challenges. You can access it by tapping on the battle button in the bottom right corner of the screen.
-Methods to Download Your Matches
-Using a Screen Recorder App
-A screen recorder app is an app that allows you to record everything that happens on your device's screen. You can use it to record your Clash Royale matches by simply launching the app, starting the recording, opening Clash Royale, and playing or watching your match. When you are done, you can stop the recording and save it to your device's storage.
-Some of the pros of using a screen recorder app are:
-How to download clash royale replays on android
-Clash royale match downloader app for ios
-Best way to save clash royale battles on pc
-Download clash royale matches with sound and commentary
-Clash royale match video downloader online free
-How to download clash royale matches from supercell id
-Clash royale match downloader apk latest version
-Download clash royale matches in hd quality
-How to download clash royale matches without root
-Clash royale match downloader for windows 10
-Download clash royale matches and share them on social media
-Clash royale match downloader mod apk unlimited gems
-How to download clash royale matches from tv royale
-Clash royale match downloader for mac os
-Download clash royale matches and edit them with video editor
-Clash royale match downloader no watermark
-How to download clash royale matches from clan wars
-Clash royale match downloader for chromebook
-Download clash royale matches and watch them offline
-Clash royale match downloader pro apk premium features
-How to download clash royale matches from global tournaments
-Clash royale match downloader for linux
-Download clash royale matches and convert them to mp4
-Clash royale match downloader with subtitles
-How to download clash royale matches from special challenges
-Clash royale match downloader for fire tablet
-Download clash royale matches and make gifs
-Clash royale match downloader with screen recorder
-How to download clash royale matches from season pass
-Clash royale match downloader for smart tv
-Download clash royale matches and upload them to youtube
-Clash royale match downloader with voice changer
-How to download clash royale matches from private tournaments
-Clash royale match downloader for bluestacks
-Download clash royale matches and analyze them with stats tool
-Clash royale match downloader with facecam
-How to download clash royale matches from friendly battles
-Clash royale match downloader for nox player
-Download clash royale matches and create montages
-Clash royale match downloader with music player
-
-You can record any match you want, whether it is yours or someone else's.
-You can record in high quality and resolution.
-You can record with sound and voice commentary.
-You can edit your recordings within the app or with another app.
-
-Some of the cons of using a screen recorder app are:
-
-You need to have enough storage space on your device to save your recordings.
-You need to have a compatible device and operating system to run the app.
-You might experience some lag or performance issues while recording.
-You might need to pay for some features or remove ads from the app.
-
-Some examples of screen recorder apps for Android and iOS devices are:
-
-
-App Name
-Price
-Rating
-Features
-
-
-AZ Screen Recorder
-Free with in-app purchases
-4.5/5 stars on Google Play Store
-- Record in HD and Full HD quality - Record with internal or external sound - Record with facecam - Edit videos with trimming, cutting, merging, adding text, etc. - Live stream to YouTube, Facebook, Twitch, etc.
-
-
-DU Recorder
-Free with in-app purchases
-4.3/5 stars on Google Play Store
-- Record in 1080p, 12Mbps, 60FPS quality - Record with internal or external sound - Record with facecam - Edit videos with trimming, cropping, adding music, etc. - Live stream to YouTube, Facebook, Twitch, etc.
-
-
-Screen Recorder & Video Editor
-Free with in-app purchases
-4.7/5 stars on App Store
-- Record in 1080p quality - Record with microphone sound - Record with facecam - Edit videos with trimming, splitting, adding filters, etc. - Share videos to YouTube, Instagram, TikTok, etc.
-
-
-TechSmith Capture
-Free
-4.6/5 stars on App Store
-- Record in high quality - Record with microphone sound - Import videos from your device or cloud storage - Share videos to Camtasia or Snagit for further editing
-
- Using a Third-Party Website
- A third-party website is a website that allows you to download your Clash Royale matches by entering your player tag or the URL of the match you want to download. You can find the URL of the match by tapping on the share button in the battle log and then copying it to your clipboard. You can then paste it into the website and download the match as a video file.
- Some of the pros of using a third-party website are:
-
- You do not need to install any app on your device or use any storage space.
- You can download any match you want, whether it is yours or someone else's.
- You can download in different formats and resolutions.
- You can download multiple matches at once.
-
- Some of the cons of using a third-party website are:
-
- You need to have a stable internet connection to access the website and download the matches.
- You might encounter some ads or pop-ups on the website.
- You might need to wait for some time for the match to be processed and downloaded.
- You might not be able to download some matches due to technical issues or legal restrictions.
-
- Some examples of third-party websites for downloading Clash Royale matches are:
-
-
- Website Name
- Price
- Rating
- Features
-
-
- RoyaleAPI Replay Downloader
- Free
- N/A
- - Download matches from RoyaleAPI.com by entering your player tag or the match URL - Download in MP4 format and 720p resolution - Download multiple matches at once by entering multiple URLs separated by commas - View match details and statistics on RoyaleAPI.com
-
-
- RoyaleReplay.com
- Free with ads
- N/A
- - Download matches from Clash Royale by entering your player tag or the match URL - Download in MP4 format and 480p resolution - Download one match at a time - View match details and statistics on RoyaleReplay.com
-
-
- RoyaleTube.net
- Free with ads
- N/A
- - Download matches from Clash Royale by entering your player tag or the match URL - Download in MP4 format and 360p resolution - Download one match at a time
-
-
- Conclusion
- Downloading your Clash Royale matches can be a fun and useful way to enjoy the game and improve your skills. You can use either a screen recorder app or a third-party website to download your matches easily and quickly. However, you should also be aware of the pros and cons of each method and choose the one that suits your needs and preferences best.
- Here are some tips and tricks for improving your gameplay and enjoying your matches:
-
- Watch your own replays and learn from your mistakes and successes.
- Watch other players' replays and learn from their strategies and techniques.
- Experiment with different decks and cards and find the ones that work best for you.
- Join or form a clan and participate in clan wars and friendly battles.
- Have fun and don't get too frustrated or angry when you lose.
-
- FAQs
- Q1: Can I download my matches directly from the Clash Royale app?
-A1: No, you cannot download your matches directly from the Clash Royale app. You need to use a screen recorder app or a third-party website to do so.
-Q2: How long are my matches stored in the battle log?
-A2: Your matches are stored in the battle log for up to 25 days. After that, they will be deleted and you will not be able to download them anymore.
-Q3: Can I download other players' matches from the leaderboards or tournaments?
-A3: Yes, you can download other players' matches from the leaderboards or tournaments by tapping on their names and then on the replay button. You can use the same methods as described above to download their matches.
-Q4: Can I edit or share my downloaded matches with others?
-A4: Yes, you can edit or share your downloaded matches with others using any video editing or sharing app of your choice. You can also upload them to YouTube, Facebook, Instagram, or other social media platforms.
-Q5: Can I get in trouble for downloading my matches or other players' matches?
-A5: No, you cannot get in trouble for downloading your matches or other players' matches. However, you should respect the privacy and intellectual property rights of others and not use their matches for any malicious or illegal purposes.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/CVE-2020-2122 (brakeman) A Detailed Analysis of the JSON File Handling Flaw in Jenkins Plugin.md b/spaces/contluForse/HuggingGPT/assets/CVE-2020-2122 (brakeman) A Detailed Analysis of the JSON File Handling Flaw in Jenkins Plugin.md
deleted file mode 100644
index 5875a9cc35ef6c21ab8ba9c433f75f59a5b56b89..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/CVE-2020-2122 (brakeman) A Detailed Analysis of the JSON File Handling Flaw in Jenkins Plugin.md
+++ /dev/null
@@ -1,6 +0,0 @@
-CVE-2020-2122 (brakeman) Download ○○○ https://ssurll.com/2uzw1Q
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/contluForse/HuggingGPT/assets/Download South Indian Recipe Book Pdf A Must-Have for Every Food Lover Who Wants to Try Something New and Exotic.md b/spaces/contluForse/HuggingGPT/assets/Download South Indian Recipe Book Pdf A Must-Have for Every Food Lover Who Wants to Try Something New and Exotic.md
deleted file mode 100644
index 91a542e219279b4d2cb3e5f956ba7dbb447781b8..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Download South Indian Recipe Book Pdf A Must-Have for Every Food Lover Who Wants to Try Something New and Exotic.md
+++ /dev/null
@@ -1,19 +0,0 @@
-
-These recipe books feature just some of the food that you will be able to cook in your Air fryer. From French fries to spring rolls to even souffles, the possibilities are limitless! The Air fryer lets you fry, bake, grill and steam healthier, faster and more conveniently We hope that you will enjoy using the Air fryer as many others have around the world, and the recipes inside inspire you to cook healthy, well-balanced meals for you and your family.
-The gowise air fryer recipes pdf will show you how to make the most of your gowise fryer. This gowise air fryer cookbook contains some delicious and creative recipes to get you started. From Chipotle tuna melt to creamy cheesecake, there are some amazing gowise air fryer recipes to help you please your family and guests.
-Download South Indian Recipe Book Pdf Download File === https://ssurll.com/2uzxjK
-This philips air fryer recipe book contains numerous recipes for quick and healthy snacks and meals. You get over 30 philips air fryer recipes for side dishes, appetisers, entrees, desserts and kids meals. Philips airfryer recipe booklet will surely inspire you to cook wholesome meals for your loved ones.
-Philips airfryer recipe booklet is full of inspiring recipes for healthy, low fat food. There are airfryer philips recipes that help you make full use of your favourite appliance for frying, grilling, baking and roasting. Just look into this amazing philips air fryer recipe book and prepare great tasting food for your family and friends.
-Philips hd9220 air fryer recipe book is a collection of 25 healthy, quick and delicious recipes. In this philips airfryer recipe booklet, you will find recipes for both vegetarian and non-vegetarian appetizers. These Indian inspired Philips recipes will let you treat your family and friends to some lip smacking delicacies
-If you have bought a Tefal actifry, your next step to get started is tefal actifry recipes book download. Tefal airfryer recipe book is probably one of the most comprehensive recipe books complete with meal plans, health information, cooking tips and a whole lot of recipes. It is a collection some of the best air fryer recipes and you will find everything from basic everyday meal recipes to gourmet food recipes.
-Looking for free air fryer recipes? Here is the best free air fryer book for Cooks Companion owners. This recipe book lets you explore your taste preferences with best recipes from around the world. So, get ready to impress your family and friends with food that has less oil and more taste.
-This free air fryer recipes pdf lets you do exactly what it claims: cooking meals in a healthier way. With this philips airfryer recipe book download, you get access to more than 30 amazing recipes from around the world pdf. Famous Chef, restaurateur, author and television presenter Stacie Stewart has contributed 10 recipes for this book.
-With this free air fryer recipe book, you can prepare some amazing dishes such as Matcha cheesecake, Pumpkin cookies and even Croissants in your Cucina air fryer. It is a collection of 12 best air fryer recipes that help you create complete four course meals in a much healthier way.
-This one is not just an air fryer recipes pdf, but a complete operation and safety manual. It includes some basic recipes and cooking times and tips for specific foods. It can get you started with air frying, but for more recipes you will have to check out some other airfryer recipes book.
-
-This air fryer recipes pdf will get you started with your Glen air fryer. It is a collection of 9 simple air fryer recipes that give your favourite foods such as fish fingers and potato wedges a healthy makeover. The cook book is very well illustrated with high resolution pictures.
-This air fryer cookbook pdf lets you prepare the most popular potato and meat dishes in your Habor Air Fryer. It also contains 4 dessert recipes. What more, there is a comprehensive guide to cooking times and temperatures, so you can prepare all your favourite foods with ease, in a much healthier way.
-South Indian masala vada or parippu vada also known as Dal vada goes low oil.Still as crunchy and tasty and super delish! If you are looking for air fryer south Indian recipes then this is the one to try out..
-We also have a similar blog post for beginner instant pot recipes . It has been downloaded by millions of instant pot readers and perfect for instant pot inspiration. You can access it by clicking here.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/utils/misc.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/utils/misc.py
deleted file mode 100644
index eb862a82bd47c8624db3dd5c6fb6ad8a03b62466..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/core/utils/misc.py
+++ /dev/null
@@ -1,17 +0,0 @@
-def add_prefix(inputs, prefix):
- """Add prefix for dict.
-
- Args:
- inputs (dict): The input dict with str keys.
- prefix (str): The prefix to add.
-
- Returns:
-
- dict: The dict with keys updated with ``prefix``.
- """
-
- outputs = dict()
- for name, value in inputs.items():
- outputs[f'{prefix}.{name}'] = value
-
- return outputs
diff --git a/spaces/crashedice/signify/SOURCE/yolo_files/utils/wandb_logging/__init__.py b/spaces/crashedice/signify/SOURCE/yolo_files/utils/wandb_logging/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_api.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_api.py
deleted file mode 100644
index 7754f084fc7b656a44dfb4e2a0b6d0a10f112eaf..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/filelock/_api.py
+++ /dev/null
@@ -1,281 +0,0 @@
-from __future__ import annotations
-
-import contextlib
-import logging
-import os
-import time
-import warnings
-from abc import ABC, abstractmethod
-from dataclasses import dataclass
-from threading import local
-from typing import TYPE_CHECKING, Any
-
-from ._error import Timeout
-
-if TYPE_CHECKING:
- from types import TracebackType
-
-_LOGGER = logging.getLogger("filelock")
-
-
-# This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__
-# is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired
-# again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak)
-class AcquireReturnProxy:
- """A context aware object that will release the lock file when exiting."""
-
- def __init__(self, lock: BaseFileLock) -> None:
- self.lock = lock
-
- def __enter__(self) -> BaseFileLock:
- return self.lock
-
- def __exit__(
- self,
- exc_type: type[BaseException] | None,
- exc_value: BaseException | None,
- traceback: TracebackType | None,
- ) -> None:
- self.lock.release()
-
-
-@dataclass
-class FileLockContext:
- """A dataclass which holds the context for a ``BaseFileLock`` object."""
-
- # The context is held in a separate class to allow optional use of thread local storage via the
- # ThreadLocalFileContext class.
-
- #: The path to the lock file.
- lock_file: str
-
- #: The default timeout value.
- timeout: float
-
- #: The mode for the lock files
- mode: int
-
- #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held
- lock_file_fd: int | None = None
-
- #: The lock counter is used for implementing the nested locking mechanism.
- lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0
-
-
-class ThreadLocalFileContext(FileLockContext, local):
- """A thread local version of the ``FileLockContext`` class."""
-
-
-class BaseFileLock(ABC, contextlib.ContextDecorator):
- """Abstract base class for a file lock object."""
-
- def __init__(
- self,
- lock_file: str | os.PathLike[Any],
- timeout: float = -1,
- mode: int = 0o644,
- thread_local: bool = True, # noqa: FBT001, FBT002
- ) -> None:
- """
- Create a new lock object.
-
- :param lock_file: path to the file
- :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in
- the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it
- to a negative value. A timeout of 0 means, that there is exactly one attempt to acquire the file lock.
- :param mode: file permissions for the lockfile.
- :param thread_local: Whether this object's internal context should be thread local or not.
- If this is set to ``False`` then the lock will be reentrant across threads.
- """
- self._is_thread_local = thread_local
-
- # Create the context. Note that external code should not work with the context directly and should instead use
- # properties of this class.
- kwargs: dict[str, Any] = {
- "lock_file": os.fspath(lock_file),
- "timeout": timeout,
- "mode": mode,
- }
- self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs)
-
- def is_thread_local(self) -> bool:
- """:return: a flag indicating if this lock is thread local or not"""
- return self._is_thread_local
-
- @property
- def lock_file(self) -> str:
- """:return: path to the lock file"""
- return self._context.lock_file
-
- @property
- def timeout(self) -> float:
- """
- :return: the default timeout value, in seconds
-
- .. versionadded:: 2.0.0
- """
- return self._context.timeout
-
- @timeout.setter
- def timeout(self, value: float | str) -> None:
- """
- Change the default timeout value.
-
- :param value: the new value, in seconds
- """
- self._context.timeout = float(value)
-
- @abstractmethod
- def _acquire(self) -> None:
- """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file."""
- raise NotImplementedError
-
- @abstractmethod
- def _release(self) -> None:
- """Releases the lock and sets self._context.lock_file_fd to None."""
- raise NotImplementedError
-
- @property
- def is_locked(self) -> bool:
- """
-
- :return: A boolean indicating if the lock file is holding the lock currently.
-
- .. versionchanged:: 2.0.0
-
- This was previously a method and is now a property.
- """
- return self._context.lock_file_fd is not None
-
- @property
- def lock_counter(self) -> int:
- """:return: The number of times this lock has been acquired (but not yet released)."""
- return self._context.lock_counter
-
- def acquire(
- self,
- timeout: float | None = None,
- poll_interval: float = 0.05,
- *,
- poll_intervall: float | None = None,
- blocking: bool = True,
- ) -> AcquireReturnProxy:
- """
- Try to acquire the file lock.
-
- :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and
- if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired
- :param poll_interval: interval of trying to acquire the lock file
- :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead
- :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the
- first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired.
- :raises Timeout: if fails to acquire lock within the timeout period
- :return: a context object that will unlock the file when the context is exited
-
- .. code-block:: python
-
- # You can use this method in the context manager (recommended)
- with lock.acquire():
- pass
-
- # Or use an equivalent try-finally construct:
- lock.acquire()
- try:
- pass
- finally:
- lock.release()
-
- .. versionchanged:: 2.0.0
-
- This method returns now a *proxy* object instead of *self*,
- so that it can be used in a with statement without side effects.
-
- """
- # Use the default timeout, if no timeout is provided.
- if timeout is None:
- timeout = self._context.timeout
-
- if poll_intervall is not None:
- msg = "use poll_interval instead of poll_intervall"
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
- poll_interval = poll_intervall
-
- # Increment the number right at the beginning. We can still undo it, if something fails.
- self._context.lock_counter += 1
-
- lock_id = id(self)
- lock_filename = self.lock_file
- start_time = time.perf_counter()
- try:
- while True:
- if not self.is_locked:
- _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename)
- self._acquire()
- if self.is_locked:
- _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename)
- break
- if blocking is False:
- _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename)
- raise Timeout(lock_filename) # noqa: TRY301
- if 0 <= timeout < time.perf_counter() - start_time:
- _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename)
- raise Timeout(lock_filename) # noqa: TRY301
- msg = "Lock %s not acquired on %s, waiting %s seconds ..."
- _LOGGER.debug(msg, lock_id, lock_filename, poll_interval)
- time.sleep(poll_interval)
- except BaseException: # Something did go wrong, so decrement the counter.
- self._context.lock_counter = max(0, self._context.lock_counter - 1)
- raise
- return AcquireReturnProxy(lock=self)
-
- def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002
- """
- Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. Also
- note, that the lock file itself is not automatically deleted.
-
- :param force: If true, the lock counter is ignored and the lock is released in every case/
- """
- if self.is_locked:
- self._context.lock_counter -= 1
-
- if self._context.lock_counter == 0 or force:
- lock_id, lock_filename = id(self), self.lock_file
-
- _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename)
- self._release()
- self._context.lock_counter = 0
- _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename)
-
- def __enter__(self) -> BaseFileLock:
- """
- Acquire the lock.
-
- :return: the lock object
- """
- self.acquire()
- return self
-
- def __exit__(
- self,
- exc_type: type[BaseException] | None,
- exc_value: BaseException | None,
- traceback: TracebackType | None,
- ) -> None:
- """
- Release the lock.
-
- :param exc_type: the exception type if raised
- :param exc_value: the exception value if raised
- :param traceback: the exception traceback if raised
- """
- self.release()
-
- def __del__(self) -> None:
- """Called when the lock object is deleted."""
- self.release(force=True)
-
-
-__all__ = [
- "BaseFileLock",
- "AcquireReturnProxy",
-]
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/tests/abstract/get.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/tests/abstract/get.py
deleted file mode 100644
index baa9aa4a915f618369b53a28fd106e000aed7b9c..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/tests/abstract/get.py
+++ /dev/null
@@ -1,377 +0,0 @@
-class AbstractGetTests:
- def test_get_file_to_existing_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 1a
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
- assert local_fs.isdir(target)
-
- target_file2 = local_join(target, "file2")
- target_subfile1 = local_join(target, "subfile1")
-
- # Copy from source directory
- fs.get(fs_join(source, "file2"), target)
- assert local_fs.isfile(target_file2)
-
- # Copy from sub directory
- fs.get(fs_join(source, "subdir", "subfile1"), target)
- assert local_fs.isfile(target_subfile1)
-
- # Remove copied files
- local_fs.rm([target_file2, target_subfile1])
- assert not local_fs.exists(target_file2)
- assert not local_fs.exists(target_subfile1)
-
- # Repeat with trailing slash on target
- fs.get(fs_join(source, "file2"), target + "/")
- assert local_fs.isdir(target)
- assert local_fs.isfile(target_file2)
-
- fs.get(fs_join(source, "subdir", "subfile1"), target + "/")
- assert local_fs.isfile(target_subfile1)
-
- def test_get_file_to_new_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 1b
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- fs.get(
- fs_join(source, "subdir", "subfile1"), local_join(target, "newdir/")
- ) # Note trailing slash
-
- assert local_fs.isdir(target)
- assert local_fs.isdir(local_join(target, "newdir"))
- assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
-
- def test_get_file_to_file_in_existing_directory(
- self,
- fs,
- fs_join,
- fs_path,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 1c
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- fs.get(fs_join(source, "subdir", "subfile1"), local_join(target, "newfile"))
- assert local_fs.isfile(local_join(target, "newfile"))
-
- def test_get_file_to_file_in_new_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 1d
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- fs.get(
- fs_join(source, "subdir", "subfile1"),
- local_join(target, "newdir", "newfile"),
- )
- assert local_fs.isdir(local_join(target, "newdir"))
- assert local_fs.isfile(local_join(target, "newdir", "newfile"))
-
- def test_get_directory_to_existing_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 1e
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- for source_slash, target_slash in zip([False, True], [False, True]):
- s = fs_join(source, "subdir")
- if source_slash:
- s += "/"
- t = target + "/" if target_slash else target
-
- # Without recursive does nothing
- # ERROR: erroneously creates new directory
- # fs.get(s, t)
- # assert fs.ls(target) == []
-
- # With recursive
- fs.get(s, t, recursive=True)
- if source_slash:
- assert local_fs.isfile(local_join(target, "subfile1"))
- assert local_fs.isfile(local_join(target, "subfile2"))
- assert local_fs.isdir(local_join(target, "nesteddir"))
- assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile"))
-
- local_fs.rm(
- [
- local_join(target, "subfile1"),
- local_join(target, "subfile2"),
- local_join(target, "nesteddir"),
- ],
- recursive=True,
- )
- else:
- assert local_fs.isdir(local_join(target, "subdir"))
- assert local_fs.isfile(local_join(target, "subdir", "subfile1"))
- assert local_fs.isfile(local_join(target, "subdir", "subfile2"))
- assert local_fs.isdir(local_join(target, "subdir", "nesteddir"))
- assert local_fs.isfile(
- local_join(target, "subdir", "nesteddir", "nestedfile")
- )
-
- local_fs.rm(local_join(target, "subdir"), recursive=True)
- assert local_fs.ls(target) == []
-
- # Limit by maxdepth
- # ERROR: maxdepth ignored here
-
- def test_get_directory_to_new_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 1f
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- for source_slash, target_slash in zip([False, True], [False, True]):
- s = fs_join(source, "subdir")
- if source_slash:
- s += "/"
- t = local_join(target, "newdir")
- if target_slash:
- t += "/"
-
- # Without recursive does nothing
- # ERROR: erroneously creates new directory
- # fs.get(s, t)
- # assert fs.ls(target) == []
-
- # With recursive
- fs.get(s, t, recursive=True)
- assert local_fs.isdir(local_join(target, "newdir"))
- assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
- assert local_fs.isfile(local_join(target, "newdir", "subfile2"))
- assert local_fs.isdir(local_join(target, "newdir", "nesteddir"))
- assert local_fs.isfile(
- local_join(target, "newdir", "nesteddir", "nestedfile")
- )
-
- local_fs.rm(local_join(target, "newdir"), recursive=True)
- assert local_fs.ls(target) == []
-
- # Limit by maxdepth
- # ERROR: maxdepth ignored here
-
- def test_get_glob_to_existing_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 1g
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- # for target_slash in [False, True]:
- for target_slash in [False]:
- t = target + "/" if target_slash else target
-
- # Without recursive
- fs.get(fs_join(source, "subdir", "*"), t)
- assert local_fs.isfile(local_join(target, "subfile1"))
- assert local_fs.isfile(local_join(target, "subfile2"))
- # assert not local_fs.isdir(local_join(target, "nesteddir")) # ERROR
- assert not local_fs.isdir(local_join(target, "subdir"))
-
- # With recursive
-
- # Limit by maxdepth
-
- def test_get_glob_to_new_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 1h
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- for target_slash in [False, True]:
- t = fs_join(target, "newdir")
- if target_slash:
- t += "/"
-
- # Without recursive
- fs.get(fs_join(source, "subdir", "*"), t)
- assert local_fs.isdir(local_join(target, "newdir"))
- assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
- assert local_fs.isfile(local_join(target, "newdir", "subfile2"))
- # ERROR - do not copy empty directory
- # assert not local_fs.exists(local_join(target, "newdir", "nesteddir"))
-
- local_fs.rm(local_join(target, "newdir"), recursive=True)
- assert local_fs.ls(target) == []
-
- # With recursive
- fs.get(fs_join(source, "subdir", "*"), t, recursive=True)
- assert local_fs.isdir(local_join(target, "newdir"))
- assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
- assert local_fs.isfile(local_join(target, "newdir", "subfile2"))
- assert local_fs.isdir(local_join(target, "newdir", "nesteddir"))
- assert local_fs.isfile(
- local_join(target, "newdir", "nesteddir", "nestedfile")
- )
-
- local_fs.rm(local_join(target, "newdir"), recursive=True)
- assert local_fs.ls(target) == []
-
- # Limit by maxdepth
- # ERROR: this is not correct
-
- def test_get_list_of_files_to_existing_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 2a
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- source_files = [
- fs_join(source, "file1"),
- fs_join(source, "file2"),
- fs_join(source, "subdir", "subfile1"),
- ]
-
- for target_slash in [False, True]:
- t = target + "/" if target_slash else target
-
- fs.get(source_files, t)
- assert local_fs.isfile(local_join(target, "file1"))
- assert local_fs.isfile(local_join(target, "file2"))
- assert local_fs.isfile(local_join(target, "subfile1"))
-
- local_fs.rm(local_fs.find(target))
- assert local_fs.ls(target) == []
-
- def test_get_list_of_files_to_new_directory(
- self,
- fs,
- fs_join,
- fs_bulk_operations_scenario_0,
- local_fs,
- local_join,
- local_target,
- ):
- # Copy scenario 2b
- source = fs_bulk_operations_scenario_0
-
- target = local_target
- local_fs.mkdir(target)
-
- source_files = [
- fs_join(source, "file1"),
- fs_join(source, "file2"),
- fs_join(source, "subdir", "subfile1"),
- ]
-
- fs.get(source_files, local_join(target, "newdir") + "/") # Note trailing slash
- assert local_fs.isdir(local_join(target, "newdir"))
- assert local_fs.isfile(local_join(target, "newdir", "file1"))
- assert local_fs.isfile(local_join(target, "newdir", "file2"))
- assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
-
- def test_get_directory_recursive(
- self, fs, fs_join, fs_path, local_fs, local_join, local_target
- ):
- # https://github.com/fsspec/filesystem_spec/issues/1062
- # Recursive cp/get/put of source directory into non-existent target directory.
- src = fs_join(fs_path, "src")
- src_file = fs_join(src, "file")
- fs.mkdir(src)
- fs.touch(src_file)
-
- target = local_target
-
- # get without slash
- assert not local_fs.exists(target)
- for loop in range(2):
- fs.get(src, target, recursive=True)
- assert local_fs.isdir(target)
-
- if loop == 0:
- assert local_fs.isfile(local_join(target, "file"))
- assert not local_fs.exists(local_join(target, "src"))
- else:
- assert local_fs.isfile(local_join(target, "file"))
- assert local_fs.isdir(local_join(target, "src"))
- assert local_fs.isfile(local_join(target, "src", "file"))
-
- local_fs.rm(target, recursive=True)
-
- # get with slash
- assert not local_fs.exists(target)
- for loop in range(2):
- fs.get(src + "/", target, recursive=True)
- assert local_fs.isdir(target)
- assert local_fs.isfile(local_join(target, "file"))
- assert not local_fs.exists(local_join(target, "src"))
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/_space_api.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/_space_api.py
deleted file mode 100644
index 2384ef5829d0d2f4f6fdbfccd69ea7d3d50f9da9..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/_space_api.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# coding=utf-8
-# Copyright 2019-present, the HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from dataclasses import dataclass
-from enum import Enum
-from typing import Dict, Optional
-
-
-class SpaceStage(str, Enum):
- """
- Enumeration of possible stage of a Space on the Hub.
-
- Value can be compared to a string:
- ```py
- assert SpaceStage.BUILDING == "BUILDING"
- ```
-
- Taken from https://github.com/huggingface/moon-landing/blob/main/server/repo_types/SpaceInfo.ts#L61 (private url).
- """
-
- # Copied from moon-landing > server > repo_types > SpaceInfo.ts (private repo)
- NO_APP_FILE = "NO_APP_FILE"
- CONFIG_ERROR = "CONFIG_ERROR"
- BUILDING = "BUILDING"
- BUILD_ERROR = "BUILD_ERROR"
- RUNNING = "RUNNING"
- RUNNING_BUILDING = "RUNNING_BUILDING"
- RUNTIME_ERROR = "RUNTIME_ERROR"
- DELETING = "DELETING"
- STOPPED = "STOPPED"
- PAUSED = "PAUSED"
-
-
-class SpaceHardware(str, Enum):
- """
- Enumeration of hardwares available to run your Space on the Hub.
-
- Value can be compared to a string:
- ```py
- assert SpaceHardware.CPU_BASIC == "cpu-basic"
- ```
-
- Taken from https://github.com/huggingface/moon-landing/blob/main/server/repo_types/SpaceInfo.ts#L73 (private url).
- """
-
- CPU_BASIC = "cpu-basic"
- CPU_UPGRADE = "cpu-upgrade"
- T4_SMALL = "t4-small"
- T4_MEDIUM = "t4-medium"
- A10G_SMALL = "a10g-small"
- A10G_LARGE = "a10g-large"
- A100_LARGE = "a100-large"
-
-
-@dataclass
-class SpaceRuntime:
- """
- Contains information about the current runtime of a Space.
-
- Args:
- stage (`str`):
- Current stage of the space. Example: RUNNING.
- hardware (`str` or `None`):
- Current hardware of the space. Example: "cpu-basic". Can be `None` if Space
- is `BUILDING` for the first time.
- requested_hardware (`str` or `None`):
- Requested hardware. Can be different than `hardware` especially if the request
- has just been made. Example: "t4-medium". Can be `None` if no hardware has
- been requested yet.
- sleep_time (`int` or `None`):
- Number of seconds the Space will be kept alive after the last request. By default (if value is `None`), the
- Space will never go to sleep if it's running on an upgraded hardware, while it will go to sleep after 48
- hours on a free 'cpu-basic' hardware. For more details, see https://huggingface.co/docs/hub/spaces-gpus#sleep-time.
- raw (`dict`):
- Raw response from the server. Contains more information about the Space
- runtime like number of replicas, number of cpu, memory size,...
- """
-
- stage: SpaceStage
- hardware: Optional[SpaceHardware]
- requested_hardware: Optional[SpaceHardware]
- sleep_time: Optional[int]
- raw: Dict
-
- def __init__(self, data: Dict) -> None:
- self.stage = data["stage"]
- self.hardware = data["hardware"]["current"]
- self.requested_hardware = data["hardware"]["requested"]
- self.sleep_time = data["gcTimeout"]
- self.raw = data
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py
deleted file mode 100644
index f8c40889bce7ec9b9645011b5e2ee8db37464b6a..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from . import _base
-from ._axes import *
-
-# Backcompat.
-from ._axes import Axes as Subplot
-
-
-class _SubplotBaseMeta(type):
- def __instancecheck__(self, obj):
- return (isinstance(obj, _base._AxesBase)
- and obj.get_subplotspec() is not None)
-
-
-class SubplotBase(metaclass=_SubplotBaseMeta):
- pass
-
-
-def subplot_class_factory(cls): return cls
diff --git a/spaces/ddiddi/bhasha.dev/share_btn.py b/spaces/ddiddi/bhasha.dev/share_btn.py
deleted file mode 100644
index a85c7c3c16e0bbeae3a8d880baf401903bca7337..0000000000000000000000000000000000000000
--- a/spaces/ddiddi/bhasha.dev/share_btn.py
+++ /dev/null
@@ -1,60 +0,0 @@
-community_icon_html = """
-
-
- """
-
-loading_icon_html = """ """
-
-share_js = """async () => {
- async function uploadFile(file){
- const UPLOAD_URL = 'https://huggingface.co/uploads';
- const response = await fetch(UPLOAD_URL, {
- method: 'POST',
- headers: {
- 'Content-Type': file.type,
- 'X-Requested-With': 'XMLHttpRequest',
- },
- body: file, /// <- File inherits from Blob
- });
- const url = await response.text();
- return url;
- }
- const gradioEl = document.querySelector('body > gradio-app');
- const imgEls = gradioEl.querySelectorAll('#gallery img');
- const promptTxt = gradioEl.querySelector('#prompt-text-input input').value;
- const shareBtnEl = gradioEl.querySelector('#share-btn');
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
- if(!imgEls.length){
- return;
- };
- shareBtnEl.style.pointerEvents = 'none';
- shareIconEl.style.display = 'none';
- loadingIconEl.style.removeProperty('display');
- const files = await Promise.all(
- [...imgEls].map(async (imgEl) => {
- const res = await fetch(imgEl.src);
- const blob = await res.blob();
- const imgId = Date.now() % 200;
- const fileName = `diffuse-the-rest-${{imgId}}.jpg`;
- return new File([blob], fileName, { type: 'image/jpeg' });
- })
- );
- const urls = await Promise.all(files.map((f) => uploadFile(f)));
- const htmlImgs = urls.map(url => ` `);
- const descriptionMd = `
-${htmlImgs.join(`\n`)}
-
`;
- const params = new URLSearchParams({
- title: promptTxt,
- description: descriptionMd,
- });
- const paramsStr = params.toString();
- window.open(`https://huggingface.co/spaces/stabilityai/stable-diffusion/discussions/new?${paramsStr}`, '_blank');
- shareBtnEl.style.removeProperty('pointer-events');
- shareIconEl.style.removeProperty('display');
- loadingIconEl.style.display = 'none';
-}"""
diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_ddim_flax.py b/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_ddim_flax.py
deleted file mode 100644
index db248c33077bf502e31cb2ab97141744b828b514..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_ddim_flax.py
+++ /dev/null
@@ -1,305 +0,0 @@
-# Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
-# and https://github.com/hojonathanho/diffusion
-
-from dataclasses import dataclass
-from typing import Optional, Tuple, Union
-
-import flax
-import jax.numpy as jnp
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from .scheduling_utils_flax import (
- CommonSchedulerState,
- FlaxKarrasDiffusionSchedulers,
- FlaxSchedulerMixin,
- FlaxSchedulerOutput,
- add_noise_common,
- get_velocity_common,
-)
-
-
-@flax.struct.dataclass
-class DDIMSchedulerState:
- common: CommonSchedulerState
- final_alpha_cumprod: jnp.ndarray
-
- # setable values
- init_noise_sigma: jnp.ndarray
- timesteps: jnp.ndarray
- num_inference_steps: Optional[int] = None
-
- @classmethod
- def create(
- cls,
- common: CommonSchedulerState,
- final_alpha_cumprod: jnp.ndarray,
- init_noise_sigma: jnp.ndarray,
- timesteps: jnp.ndarray,
- ):
- return cls(
- common=common,
- final_alpha_cumprod=final_alpha_cumprod,
- init_noise_sigma=init_noise_sigma,
- timesteps=timesteps,
- )
-
-
-@dataclass
-class FlaxDDIMSchedulerOutput(FlaxSchedulerOutput):
- state: DDIMSchedulerState
-
-
-class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin):
- """
- Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising
- diffusion probabilistic models (DDPMs) with non-Markovian guidance.
-
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
- [`~SchedulerMixin.from_pretrained`] functions.
-
- For more details, see the original paper: https://arxiv.org/abs/2010.02502
-
- Args:
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
- beta_start (`float`): the starting `beta` value of inference.
- beta_end (`float`): the final `beta` value.
- beta_schedule (`str`):
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
- trained_betas (`jnp.ndarray`, optional):
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
- clip_sample (`bool`, default `True`):
- option to clip predicted sample between -1 and 1 for numerical stability.
- set_alpha_to_one (`bool`, default `True`):
- each diffusion step uses the value of alphas product at that step and at the previous one. For the final
- step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
- otherwise it uses the value of alpha at step 0.
- steps_offset (`int`, default `0`):
- an offset added to the inference steps. You can use a combination of `offset=1` and
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
- stable diffusion.
- prediction_type (`str`, default `epsilon`):
- indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`.
- `v-prediction` is not supported for this scheduler.
- dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
- the `dtype` used for params and computation.
- """
-
- _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers]
-
- dtype: jnp.dtype
-
- @property
- def has_state(self):
- return True
-
- @register_to_config
- def __init__(
- self,
- num_train_timesteps: int = 1000,
- beta_start: float = 0.0001,
- beta_end: float = 0.02,
- beta_schedule: str = "linear",
- trained_betas: Optional[jnp.ndarray] = None,
- set_alpha_to_one: bool = True,
- steps_offset: int = 0,
- prediction_type: str = "epsilon",
- dtype: jnp.dtype = jnp.float32,
- ):
- self.dtype = dtype
-
- def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDIMSchedulerState:
- if common is None:
- common = CommonSchedulerState.create(self)
-
- # At every step in ddim, we are looking into the previous alphas_cumprod
- # For the final step, there is no previous alphas_cumprod because we are already at 0
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
- # whether we use the final alpha of the "non-previous" one.
- final_alpha_cumprod = (
- jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0]
- )
-
- # standard deviation of the initial noise distribution
- init_noise_sigma = jnp.array(1.0, dtype=self.dtype)
-
- timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1]
-
- return DDIMSchedulerState.create(
- common=common,
- final_alpha_cumprod=final_alpha_cumprod,
- init_noise_sigma=init_noise_sigma,
- timesteps=timesteps,
- )
-
- def scale_model_input(
- self, state: DDIMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
- ) -> jnp.ndarray:
- """
- Args:
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
- sample (`jnp.ndarray`): input sample
- timestep (`int`, optional): current timestep
-
- Returns:
- `jnp.ndarray`: scaled input sample
- """
- return sample
-
- def set_timesteps(
- self, state: DDIMSchedulerState, num_inference_steps: int, shape: Tuple = ()
- ) -> DDIMSchedulerState:
- """
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
-
- Args:
- state (`DDIMSchedulerState`):
- the `FlaxDDIMScheduler` state data class instance.
- num_inference_steps (`int`):
- the number of diffusion steps used when generating samples with a pre-trained model.
- """
- step_ratio = self.config.num_train_timesteps // num_inference_steps
- # creates integer timesteps by multiplying by ratio
- # rounding to avoid issues when num_inference_step is power of 3
- timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] + self.config.steps_offset
-
- return state.replace(
- num_inference_steps=num_inference_steps,
- timesteps=timesteps,
- )
-
- def _get_variance(self, state: DDIMSchedulerState, timestep, prev_timestep):
- alpha_prod_t = state.common.alphas_cumprod[timestep]
- alpha_prod_t_prev = jnp.where(
- prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod
- )
- beta_prod_t = 1 - alpha_prod_t
- beta_prod_t_prev = 1 - alpha_prod_t_prev
-
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
-
- return variance
-
- def step(
- self,
- state: DDIMSchedulerState,
- model_output: jnp.ndarray,
- timestep: int,
- sample: jnp.ndarray,
- eta: float = 0.0,
- return_dict: bool = True,
- ) -> Union[FlaxDDIMSchedulerOutput, Tuple]:
- """
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
- process from the learned model outputs (most often the predicted noise).
-
- Args:
- state (`DDIMSchedulerState`): the `FlaxDDIMScheduler` state data class instance.
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
- timestep (`int`): current discrete timestep in the diffusion chain.
- sample (`jnp.ndarray`):
- current instance of sample being created by diffusion process.
- return_dict (`bool`): option for returning tuple rather than FlaxDDIMSchedulerOutput class
-
- Returns:
- [`FlaxDDIMSchedulerOutput`] or `tuple`: [`FlaxDDIMSchedulerOutput`] if `return_dict` is True, otherwise a
- `tuple`. When returning a tuple, the first element is the sample tensor.
-
- """
- if state.num_inference_steps is None:
- raise ValueError(
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
- )
-
- # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
- # Ideally, read DDIM paper in-detail understanding
-
- # Notation ( ->
- # - pred_noise_t -> e_theta(x_t, t)
- # - pred_original_sample -> f_theta(x_t, t) or x_0
- # - std_dev_t -> sigma_t
- # - eta -> η
- # - pred_sample_direction -> "direction pointing to x_t"
- # - pred_prev_sample -> "x_t-1"
-
- # 1. get previous step value (=t-1)
- prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps
-
- alphas_cumprod = state.common.alphas_cumprod
- final_alpha_cumprod = state.final_alpha_cumprod
-
- # 2. compute alphas, betas
- alpha_prod_t = alphas_cumprod[timestep]
- alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], final_alpha_cumprod)
-
- beta_prod_t = 1 - alpha_prod_t
-
- # 3. compute predicted original sample from predicted noise also called
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- if self.config.prediction_type == "epsilon":
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
- pred_epsilon = model_output
- elif self.config.prediction_type == "sample":
- pred_original_sample = model_output
- pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
- elif self.config.prediction_type == "v_prediction":
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
- pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
- else:
- raise ValueError(
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
- " `v_prediction`"
- )
-
- # 4. compute variance: "sigma_t(η)" -> see formula (16)
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
- variance = self._get_variance(state, timestep, prev_timestep)
- std_dev_t = eta * variance ** (0.5)
-
- # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon
-
- # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
-
- if not return_dict:
- return (prev_sample, state)
-
- return FlaxDDIMSchedulerOutput(prev_sample=prev_sample, state=state)
-
- def add_noise(
- self,
- state: DDIMSchedulerState,
- original_samples: jnp.ndarray,
- noise: jnp.ndarray,
- timesteps: jnp.ndarray,
- ) -> jnp.ndarray:
- return add_noise_common(state.common, original_samples, noise, timesteps)
-
- def get_velocity(
- self,
- state: DDIMSchedulerState,
- sample: jnp.ndarray,
- noise: jnp.ndarray,
- timesteps: jnp.ndarray,
- ) -> jnp.ndarray:
- return get_velocity_common(state.common, sample, noise, timesteps)
-
- def __len__(self):
- return self.config.num_train_timesteps
diff --git a/spaces/deepghs/auto_image_censor/visual.py b/spaces/deepghs/auto_image_censor/visual.py
deleted file mode 100644
index 8450323f15c1d0d8badd572825a894e60c161b13..0000000000000000000000000000000000000000
--- a/spaces/deepghs/auto_image_censor/visual.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from functools import lru_cache
-from typing import List
-
-import matplotlib.pyplot as plt
-from PIL import Image
-from hbutils.color import rnd_colors
-
-
-@lru_cache()
-def _get_complete_classes():
- from nudenet import open_model_session
- _, classes = open_model_session()
- return classes
-
-
-@lru_cache()
-def _get_color_map():
- _all_classes = _get_complete_classes()
- colors = rnd_colors(len(_get_complete_classes()), rnd=0)
- return {cls_: (str(c),) for c, cls_ in zip(colors, _all_classes)}
-
-
-CLS_MAP = {
- 'EXPOSED_BREAST_F': 'nipple',
- 'EXPOSED_GENITALIA_F': 'pussy',
- 'EXPOSED_GENITALIA_M': 'penis',
- 'EXPOSED_ANUS': 'anus',
-}
-
-
-def plot_detection(pil_img: Image.Image, detection: List):
- plt.tight_layout()
- plt.imshow(pil_img)
- ax = plt.gca()
- _color_map = _get_color_map()
- for item in detection:
- score = item['score']
- xmin, ymin, xmax, ymax = item['box']
- class_ = item['label']
-
- box_color, = _color_map[class_]
- ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=box_color, linewidth=3))
- text = f'{CLS_MAP.get(class_, class_)}: {score * 100:.2f}%'
- ax.text(xmin, ymin, text, fontsize=8, bbox=dict(facecolor=box_color, alpha=0.5))
diff --git a/spaces/dejinlee/art/README.md b/spaces/dejinlee/art/README.md
deleted file mode 100644
index 86f26f16fcfab57969dd892758832488cf30ffeb..0000000000000000000000000000000000000000
--- a/spaces/dejinlee/art/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Art
-emoji: 🌖
-colorFrom: indigo
-colorTo: purple
-sdk: gradio
-sdk_version: 3.9
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/diacanFperku/AutoGPT/Karizma Album 12x30 Background Psd Files Free 18.md b/spaces/diacanFperku/AutoGPT/Karizma Album 12x30 Background Psd Files Free 18.md
deleted file mode 100644
index a51d5741c9629ee7f19b8f89c469cca381d23e78..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Karizma Album 12x30 Background Psd Files Free 18.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Karizma Album 12x30 Background Psd Files Free 18 Download ->>->>->> https://gohhs.com/2uFV1j
-
-Modern Marriage Photo Album Design 12x30 PSD Templates Free Download ... Karizma Design PSD File Links 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ... Templates - Karizma Album Designs Download Karizzma PSD Background ... 1fdad05405
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/La-Gran-Conexion-Arnie-Warren-PDF-2021.md b/spaces/diacanFperku/AutoGPT/La-Gran-Conexion-Arnie-Warren-PDF-2021.md
deleted file mode 100644
index 17541befba90ba301c13ae127c20e1315b657644..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/La-Gran-Conexion-Arnie-Warren-PDF-2021.md
+++ /dev/null
@@ -1,70 +0,0 @@
-## La gran conexion arnie warren PDF
-
-
-
-
-
- ![La Gran Conexion Arnie Warren PDF \[2021\]](https://ceylonmediweb.com/wp-content/uploads/2019/12/1566108581098.jpg)
-
-
-
-
-
-**Download ---> [https://urluso.com/2txxx2](https://urluso.com/2txxx2)**
-
-
-
-
-
-
-
-
-
-
-
- Here is a possible title and article with HTML formatting for the keyword "La gran conexion arnie warren PDF":
-
-# La gran conexion: A book review
-
-
-
-La gran conexion (The Great Connection) is a book by Arnie Warren that aims to help readers discover the key to successful professional relationships. The book is written as a story that follows the protagonist, Tom, as he learns about the four different styles of behavior that people tend to exhibit: analytical, driver, amiable and expressive. By understanding his own style and those of others, Tom is able to improve his communication, leadership and teamwork skills.
-
-
-
-The book is based on the DISC model of personality assessment, which was developed by William Moulton Marston in the 1920s. The model divides people into four quadrants based on their levels of dominance and sociability. The book explains how each style has its strengths and weaknesses, and how to adapt to different situations and people. The book also provides practical tips and exercises to help readers apply the concepts to their own lives.
-
-
-
-La gran conexion is a useful and engaging book for anyone who wants to improve their interpersonal skills and achieve more success in their professional and personal lives. The book is written in a simple and clear language, and uses examples and anecdotes to illustrate the points. The book is available in Spanish as a paperback or as a PDF file that can be downloaded from various online sources[^1^] [^2^] [^3^] [^4^].
-
-Here is a possible continuation of the article with HTML formatting for the keyword "La gran conexion arnie warren PDF":
-
-The book is divided into three parts. The first part introduces the four styles of behavior and how they affect Tom's life and career. The second part shows how Tom learns to identify and connect with each style through the guidance of his mentor, Frank. The third part reveals how Tom applies his new skills to improve his relationships with his family, friends, colleagues and clients.
-
-
-
-The book is not only informative, but also entertaining and inspiring. The story is full of humor, drama and emotion, and the characters are realistic and relatable. The book also includes a self-assessment test that helps readers identify their own style and those of others. The book also offers a summary of the main points and a list of action steps at the end of each chapter.
-
-
-
-La gran conexion is a book that can change your life for the better. It can help you understand yourself and others better, communicate more effectively, build trust and rapport, resolve conflicts, motivate and influence others, and achieve your goals. It can also help you enjoy your life more by connecting with your true self and others on a deeper level.
-
-Here are a few more paragraphs for the article with HTML formatting for the keyword "La gran conexion arnie warren PDF":
-
-La gran conexion is not only a book for professionals, but also for anyone who wants to improve their personal relationships. The book can help you connect with your spouse, children, parents, friends and neighbors. It can also help you deal with difficult people and situations. The book can teach you how to appreciate and respect the differences and similarities among people, and how to create harmony and synergy.
-
-
-
-La gran conexion is also a book for personal growth and development. The book can help you discover your strengths and weaknesses, your values and beliefs, your passions and goals. It can also help you overcome your fears and doubts, your limiting beliefs and habits, your negative emotions and thoughts. The book can inspire you to be more confident, creative, positive and happy.
-
-
-
-La gran conexion is a book that can transform your life. It can help you connect with yourself and others in a meaningful and fulfilling way. It can help you live a life of purpose and joy. It can help you make a difference in the world. It can help you achieve the great connection.
-
- dfd1c89656
-
-
-
-
-
diff --git a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/train_ms.py b/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/train_ms.py
deleted file mode 100644
index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/train_ms.py
+++ /dev/null
@@ -1,402 +0,0 @@
-import os
-import json
-import argparse
-import itertools
-import math
-import torch
-import shutil
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.multiprocessing as mp
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-from tqdm import tqdm
-import logging
-logging.getLogger('numba').setLevel(logging.WARNING)
-import commons
-import utils
-from data_utils import (
- TextAudioSpeakerLoader,
- TextAudioSpeakerCollate,
- DistributedBucketSampler
-)
-from models import (
- SynthesizerTrn,
- MultiPeriodDiscriminator,
- DurationDiscriminator,
-)
-from losses import (
- generator_loss,
- discriminator_loss,
- feature_loss,
- kl_loss
-)
-from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-from text.symbols import symbols
-
-torch.backends.cudnn.benchmark = True
-torch.backends.cuda.matmul.allow_tf32 = True
-torch.backends.cudnn.allow_tf32 = True
-torch.set_float32_matmul_precision('medium')
-global_step = 0
-
-
-def main():
- """Assume Single Node Multi GPUs Training Only"""
- assert torch.cuda.is_available(), "CPU training is not allowed."
-
- n_gpus = torch.cuda.device_count()
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = '65280'
-
- hps = utils.get_hparams()
- if not hps.cont:
- shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth')
- shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth')
- shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth')
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
-
-
-def run(rank, n_gpus, hps):
- global global_step
- if rank == 0:
- logger = utils.get_logger(hps.model_dir)
- logger.info(hps)
- utils.check_git_hash(hps.model_dir)
- writer = SummaryWriter(log_dir=hps.model_dir)
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
-
- dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
- torch.manual_seed(hps.train.seed)
- torch.cuda.set_device(rank)
-
- train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
- train_sampler = DistributedBucketSampler(
- train_dataset,
- hps.train.batch_size,
- [32, 300, 400, 500, 600, 700, 800, 900, 1000],
- num_replicas=n_gpus,
- rank=rank,
- shuffle=True)
- collate_fn = TextAudioSpeakerCollate()
- train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True,
- collate_fn=collate_fn, batch_sampler=train_sampler)
- if rank == 0:
- eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
- eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
- batch_size=1, pin_memory=True,
- drop_last=False, collate_fn=collate_fn)
- if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True:
- print("Using noise scaled MAS for VITS2")
- use_noise_scaled_mas = True
- mas_noise_scale_initial = 0.01
- noise_scale_delta = 2e-6
- else:
- print("Using normal MAS for VITS1")
- use_noise_scaled_mas = False
- mas_noise_scale_initial = 0.0
- noise_scale_delta = 0.0
- if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True:
- print("Using duration discriminator for VITS2")
- use_duration_discriminator = True
- net_dur_disc = DurationDiscriminator(
- hps.model.hidden_channels,
- hps.model.hidden_channels,
- 3,
- 0.1,
- gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0,
- ).cuda(rank)
- if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True:
- if hps.data.n_speakers == 0:
- raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model")
- use_spk_conditioned_encoder = True
- else:
- print("Using normal encoder for VITS1")
- use_spk_conditioned_encoder = False
-
- net_g = SynthesizerTrn(
- len(symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- n_speakers=hps.data.n_speakers,
- mas_noise_scale_initial = mas_noise_scale_initial,
- noise_scale_delta = noise_scale_delta,
- **hps.model).cuda(rank)
-
- freeze_enc = getattr(hps.model, "freeze_enc", False)
- if freeze_enc:
- print("freeze encoder !!!")
- for param in net_g.enc_p.parameters():
- param.requires_grad = False
-
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
- optim_g = torch.optim.AdamW(
- filter(lambda p: p.requires_grad, net_g.parameters()),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- optim_d = torch.optim.AdamW(
- net_d.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- if net_dur_disc is not None:
- optim_dur_disc = torch.optim.AdamW(
- net_dur_disc.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- else:
- optim_dur_disc = None
- net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
- net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
- if net_dur_disc is not None:
- net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True)
-
- pretrain_dir = None
- if pretrain_dir is None:
- try:
- if net_dur_disc is not None:
- _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont)
- _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
- optim_g, skip_optimizer=not hps.cont)
- _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
- optim_d, skip_optimizer=not hps.cont)
-
- epoch_str = max(epoch_str, 1)
- global_step = (epoch_str - 1) * len(train_loader)
- except Exception as e:
- print(e)
- epoch_str = 1
- global_step = 0
- else:
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g,
- optim_g, True)
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d,
- optim_d, True)
-
-
-
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
- if net_dur_disc is not None:
- scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
- else:
- scheduler_dur_disc = None
- scaler = GradScaler(enabled=hps.train.fp16_run)
-
- for epoch in range(epoch_str, hps.train.epochs + 1):
- if rank == 0:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
- else:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None)
- scheduler_g.step()
- scheduler_d.step()
- if net_dur_disc is not None:
- scheduler_dur_disc.step()
-
-
-def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
- net_g, net_d, net_dur_disc = nets
- optim_g, optim_d, optim_dur_disc = optims
- scheduler_g, scheduler_d, scheduler_dur_disc = schedulers
- train_loader, eval_loader = loaders
- if writers is not None:
- writer, writer_eval = writers
-
- train_loader.batch_sampler.set_epoch(epoch)
- global global_step
-
- net_g.train()
- net_d.train()
- if net_dur_disc is not None:
- net_dur_disc.train()
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)):
- if net_g.module.use_noise_scaled_mas:
- current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step
- net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0)
- x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
- spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
- y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
- speakers = speakers.cuda(rank, non_blocking=True)
- tone = tone.cuda(rank, non_blocking=True)
- language = language.cuda(rank, non_blocking=True)
- bert = bert.cuda(rank, non_blocking=True)
-
- with autocast(enabled=hps.train.fp16_run):
- y_hat, l_length, attn, ids_slice, x_mask, z_mask, \
- (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert)
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
-
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
-
- # Discriminator
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
- with autocast(enabled=False):
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
- loss_disc_all = loss_disc
- if net_dur_disc is not None:
- y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach())
- with autocast(enabled=False):
- # TODO: I think need to mean using the mask, but for now, just mean all
- loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g)
- loss_dur_disc_all = loss_dur_disc
- optim_dur_disc.zero_grad()
- scaler.scale(loss_dur_disc_all).backward()
- scaler.unscale_(optim_dur_disc)
- grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None)
- scaler.step(optim_dur_disc)
-
- optim_d.zero_grad()
- scaler.scale(loss_disc_all).backward()
- scaler.unscale_(optim_d)
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
- scaler.step(optim_d)
-
- with autocast(enabled=hps.train.fp16_run):
- # Generator
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
- if net_dur_disc is not None:
- y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_)
- with autocast(enabled=False):
- loss_dur = torch.sum(l_length.float())
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
-
- loss_fm = feature_loss(fmap_r, fmap_g)
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
- if net_dur_disc is not None:
- loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g)
- loss_gen_all += loss_dur_gen
- optim_g.zero_grad()
- scaler.scale(loss_gen_all).backward()
- scaler.unscale_(optim_g)
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
- scaler.step(optim_g)
- scaler.update()
-
- if rank == 0:
- if global_step % hps.train.log_interval == 0:
- lr = optim_g.param_groups[0]['lr']
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
- epoch,
- 100. * batch_idx / len(train_loader)))
- logger.info([x.item() for x in losses] + [global_step, lr])
-
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr,
- "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
- scalar_dict.update(
- {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
- scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
- scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
- scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
-
- image_dict = {
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
- "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy())
- }
- utils.summarize(
- writer=writer,
- global_step=global_step,
- images=image_dict,
- scalars=scalar_dict)
-
- if global_step % hps.train.eval_interval == 0:
- evaluate(hps, net_g, eval_loader, writer_eval)
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
- if net_dur_disc is not None:
- utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step)))
- keep_ckpts = getattr(hps.train, 'keep_ckpts', 5)
- if keep_ckpts > 0:
- utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True)
-
-
- global_step += 1
-
- if rank == 0:
- logger.info('====> Epoch: {}'.format(epoch))
-
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
- generator.eval()
- image_dict = {}
- audio_dict = {}
- print("Evaluating ...")
- with torch.no_grad():
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader):
- x, x_lengths = x.cuda(), x_lengths.cuda()
- spec, spec_lengths = spec.cuda(), spec_lengths.cuda()
- y, y_lengths = y.cuda(), y_lengths.cuda()
- speakers = speakers.cuda()
- bert = bert.cuda()
- tone = tone.cuda()
- language = language.cuda()
- for use_sdp in [True, False]:
- y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0)
- y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length
-
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1).float(),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
- image_dict.update({
- f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
- })
- audio_dict.update({
- f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]]
- })
- image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
- audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]})
-
- utils.summarize(
- writer=writer_eval,
- global_step=global_step,
- images=image_dict,
- audios=audio_dict,
- audio_sampling_rate=hps.data.sampling_rate
- )
- generator.train()
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/digitalxingtong/Miiu-Bert-Vits2/text/japanese.py b/spaces/digitalxingtong/Miiu-Bert-Vits2/text/japanese.py
deleted file mode 100644
index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Miiu-Bert-Vits2/text/japanese.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py
-import re
-import sys
-
-import pyopenjtalk
-
-from text import symbols
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(
- r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(
- r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (symbol, Japanese) pairs for marks:
-_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('%', 'パーセント')
-]]
-
-
-# List of (consonant, sokuon) pairs:
-_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
- (r'Q([↑↓]*[kg])', r'k#\1'),
- (r'Q([↑↓]*[tdjʧ])', r't#\1'),
- (r'Q([↑↓]*[sʃ])', r's\1'),
- (r'Q([↑↓]*[pb])', r'p#\1')
-]]
-
-# List of (consonant, hatsuon) pairs:
-_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
- (r'N([↑↓]*[pbm])', r'm\1'),
- (r'N([↑↓]*[ʧʥj])', r'n^\1'),
- (r'N([↑↓]*[tdn])', r'n\1'),
- (r'N([↑↓]*[kg])', r'ŋ\1')
-]]
-
-
-
-def post_replace_ph(ph):
- rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- 'v': "V"
- }
- if ph in rep_map.keys():
- ph = rep_map[ph]
- if ph in symbols:
- return ph
- if ph not in symbols:
- ph = 'UNK'
- return ph
-
-def symbols_to_japanese(text):
- for regex, replacement in _symbols_to_japanese:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def preprocess_jap(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- text = symbols_to_japanese(text)
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = []
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- p = pyopenjtalk.g2p(sentence)
- text += p.split(" ")
-
- if i < len(marks):
- text += [marks[i].replace(' ', '')]
- return text
-
-def text_normalize(text):
- # todo: jap text normalize
- return text
-
-def g2p(norm_text):
- phones = preprocess_jap(norm_text)
- phones = [post_replace_ph(i) for i in phones]
- # todo: implement tones and word2ph
- tones = [0 for i in phones]
- word2ph = [1 for i in phones]
- return phones, tones, word2ph
-
-
-if __name__ == '__main__':
- for line in open("../../../Downloads/transcript_utf8.txt").readlines():
- text = line.split(":")[1]
- phones, tones, word2ph = g2p(text)
- for p in phones:
- if p == "z":
- print(text, phones)
- sys.exit(0)
diff --git a/spaces/digitalxingtong/Nailv-Bert-Vits2/README_zh.md b/spaces/digitalxingtong/Nailv-Bert-Vits2/README_zh.md
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Nailv-Bert-Vits2/README_zh.md
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/spaces/dineshreddy/WALT/mmdet/version.py b/spaces/dineshreddy/WALT/mmdet/version.py
deleted file mode 100644
index a3b741aed16212ad1dee277d519b259ae3184b19..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/version.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) Open-MMLab. All rights reserved.
-
-__version__ = '2.11.0'
-short_version = __version__
-
-
-def parse_version_info(version_str):
- version_info = []
- for x in version_str.split('.'):
- if x.isdigit():
- version_info.append(int(x))
- elif x.find('rc') != -1:
- patch_version = x.split('rc')
- version_info.append(int(patch_version[0]))
- version_info.append(f'rc{patch_version[1]}')
- return tuple(version_info)
-
-
-version_info = parse_version_info(__version__)
diff --git a/spaces/dmeck/RVC-Speakers/vits/text/cleaners.py b/spaces/dmeck/RVC-Speakers/vits/text/cleaners.py
deleted file mode 100644
index 0482fd38c1f85a3df9e23746b82da4b8013e23c2..0000000000000000000000000000000000000000
--- a/spaces/dmeck/RVC-Speakers/vits/text/cleaners.py
+++ /dev/null
@@ -1,475 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Cleaners are transformations that run over the input text at both training and eval time.
-
-Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
-hyperparameter. Some cleaners are English-specific. You'll typically want to use:
- 1. "english_cleaners" for English text
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
- the symbols in symbols.py to match your data).
-'''
-
-import re
-from unidecode import unidecode
-import pyopenjtalk
-from jamo import h2j, j2hcj
-from pypinyin import lazy_pinyin, BOPOMOFO
-import jieba, cn2an
-
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
-
-# Regular expression matching whitespace:
-_whitespace_re = re.compile(r'\s+')
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
- ('mrs', 'misess'),
- ('mr', 'mister'),
- ('dr', 'doctor'),
- ('st', 'saint'),
- ('co', 'company'),
- ('jr', 'junior'),
- ('maj', 'major'),
- ('gen', 'general'),
- ('drs', 'doctors'),
- ('rev', 'reverend'),
- ('lt', 'lieutenant'),
- ('hon', 'honorable'),
- ('sgt', 'sergeant'),
- ('capt', 'captain'),
- ('esq', 'esquire'),
- ('ltd', 'limited'),
- ('col', 'colonel'),
- ('ft', 'fort'),
-]]
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄳ', 'ㄱㅅ'),
- ('ㄵ', 'ㄴㅈ'),
- ('ㄶ', 'ㄴㅎ'),
- ('ㄺ', 'ㄹㄱ'),
- ('ㄻ', 'ㄹㅁ'),
- ('ㄼ', 'ㄹㅂ'),
- ('ㄽ', 'ㄹㅅ'),
- ('ㄾ', 'ㄹㅌ'),
- ('ㄿ', 'ㄹㅍ'),
- ('ㅀ', 'ㄹㅎ'),
- ('ㅄ', 'ㅂㅅ'),
- ('ㅘ', 'ㅗㅏ'),
- ('ㅙ', 'ㅗㅐ'),
- ('ㅚ', 'ㅗㅣ'),
- ('ㅝ', 'ㅜㅓ'),
- ('ㅞ', 'ㅜㅔ'),
- ('ㅟ', 'ㅜㅣ'),
- ('ㅢ', 'ㅡㅣ'),
- ('ㅑ', 'ㅣㅏ'),
- ('ㅒ', 'ㅣㅐ'),
- ('ㅕ', 'ㅣㅓ'),
- ('ㅖ', 'ㅣㅔ'),
- ('ㅛ', 'ㅣㅗ'),
- ('ㅠ', 'ㅣㅜ')
-]]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', '에이'),
- ('b', '비'),
- ('c', '시'),
- ('d', '디'),
- ('e', '이'),
- ('f', '에프'),
- ('g', '지'),
- ('h', '에이치'),
- ('i', '아이'),
- ('j', '제이'),
- ('k', '케이'),
- ('l', '엘'),
- ('m', '엠'),
- ('n', '엔'),
- ('o', '오'),
- ('p', '피'),
- ('q', '큐'),
- ('r', '아르'),
- ('s', '에스'),
- ('t', '티'),
- ('u', '유'),
- ('v', '브이'),
- ('w', '더블유'),
- ('x', '엑스'),
- ('y', '와이'),
- ('z', '제트')
-]]
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-
-def expand_abbreviations(text):
- for regex, replacement in _abbreviations:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def lowercase(text):
- return text.lower()
-
-
-def collapse_whitespace(text):
- return re.sub(_whitespace_re, ' ', text)
-
-
-def convert_to_ascii(text):
- return unidecode(text)
-
-
-def japanese_to_romaji_with_accent(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = ''
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- if text!='':
- text+=' '
- labels = pyopenjtalk.extract_fullcontext(sentence)
- for n, label in enumerate(labels):
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
- if phoneme not in ['sil','pau']:
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
- else:
- continue
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
- a2_next=-1
- else:
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
- # Accent phrase boundary
- if a3 == 1 and a2_next == 1:
- text += ' '
- # Falling
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
- text += '↓'
- # Rising
- elif a2 == 1 and a2_next == 2:
- text += '↑'
- if i 0 and re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]):
- text += '.'
- return text
diff --git a/spaces/dmeck/RVC-Speakers/vits/utils.py b/spaces/dmeck/RVC-Speakers/vits/utils.py
deleted file mode 100644
index 1de37df268eb85dcbf636db950b4e1692245bddc..0000000000000000000000000000000000000000
--- a/spaces/dmeck/RVC-Speakers/vits/utils.py
+++ /dev/null
@@ -1,436 +0,0 @@
-import os
-import glob
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import numpy as np
-from scipy.io.wavfile import read
-import torch
-import regex as re
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-zh_pattern = re.compile(r'[\u4e00-\u9fa5]')
-en_pattern = re.compile(r'[a-zA-Z]')
-jp_pattern = re.compile(r'[\u3040-\u30ff\u31f0-\u31ff]')
-kr_pattern = re.compile(r'[\uac00-\ud7af\u1100-\u11ff\u3130-\u318f\ua960-\ua97f]')
-num_pattern = re.compile(r'[0-9]')
-comma = r"(?<=[.。!!??;;,,、::'\"‘“”’()()《》「」~——])" # 向前匹配但固定长度
-tags = {'ZH': '[ZH]', 'EN': '[EN]', 'JP': '[JA]', 'KR': '[KR]'}
-
-
-def tag_cjke(text):
- '''为中英日韩加tag,中日正则分不开,故先分句分离中日再识别,以应对大部分情况'''
- sentences = re.split(r"([.。!!??;;,,、::'\"‘“”’()()【】《》「」~——]+ *(?![0-9]))", text) # 分句,排除小数点
- sentences.append("")
- sentences = ["".join(i) for i in zip(sentences[0::2], sentences[1::2])]
- # print(sentences)
- prev_lang = None
- tagged_text = ""
- for s in sentences:
- # 全为符号跳过
- nu = re.sub(r'[\s\p{P}]+', '', s, flags=re.U).strip()
- if len(nu) == 0:
- continue
- s = re.sub(r'[()()《》「」【】‘“”’]+', '', s)
- jp = re.findall(jp_pattern, s)
- # 本句含日语字符判断为日语
- if len(jp) > 0:
- prev_lang, tagged_jke = tag_jke(s, prev_lang)
- tagged_text += tagged_jke
- else:
- prev_lang, tagged_cke = tag_cke(s, prev_lang)
- tagged_text += tagged_cke
- return tagged_text
-
-
-def tag_jke(text, prev_sentence=None):
- '''为英日韩加tag'''
- # 初始化标记变量
- tagged_text = ""
- prev_lang = None
- tagged = 0
- # 遍历文本
- for char in text:
- # 判断当前字符属于哪种语言
- if jp_pattern.match(char):
- lang = "JP"
- elif zh_pattern.match(char):
- lang = "JP"
- elif kr_pattern.match(char):
- lang = "KR"
- elif en_pattern.match(char):
- lang = "EN"
- # elif num_pattern.match(char):
- # lang = prev_sentence
- else:
- lang = None
- tagged_text += char
- continue
- # 如果当前语言与上一个语言不同,就添加标记
- if lang != prev_lang:
- tagged = 1
- if prev_lang == None: # 开头
- tagged_text = tags[lang] + tagged_text
- else:
- tagged_text = tagged_text + tags[prev_lang] + tags[lang]
-
- # 重置标记变量
- prev_lang = lang
-
- # 添加当前字符到标记文本中
- tagged_text += char
-
- # 在最后一个语言的结尾添加对应的标记
- if prev_lang:
- tagged_text += tags[prev_lang]
- if not tagged:
- prev_lang = prev_sentence
- tagged_text = tags[prev_lang] + tagged_text + tags[prev_lang]
-
- return prev_lang, tagged_text
-
-
-def tag_cke(text, prev_sentence=None):
- '''为中英韩加tag'''
- # 初始化标记变量
- tagged_text = ""
- prev_lang = None
- # 是否全略过未标签
- tagged = 0
-
- # 遍历文本
- for char in text:
- # 判断当前字符属于哪种语言
- if zh_pattern.match(char):
- lang = "ZH"
- elif kr_pattern.match(char):
- lang = "KR"
- elif en_pattern.match(char):
- lang = "EN"
- # elif num_pattern.match(char):
- # lang = prev_sentence
- else:
- # 略过
- lang = None
- tagged_text += char
- continue
-
- # 如果当前语言与上一个语言不同,添加标记
- if lang != prev_lang:
- tagged = 1
- if prev_lang == None: # 开头
- tagged_text = tags[lang] + tagged_text
- else:
- tagged_text = tagged_text + tags[prev_lang] + tags[lang]
-
- # 重置标记变量
- prev_lang = lang
-
- # 添加当前字符到标记文本中
- tagged_text += char
-
- # 在最后一个语言的结尾添加对应的标记
- if prev_lang:
- tagged_text += tags[prev_lang]
- # 未标签则继承上一句标签
- if tagged == 0:
- prev_lang = prev_sentence
- tagged_text = tags[prev_lang] + tagged_text + tags[prev_lang]
- return prev_lang, tagged_text
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None, drop_speaker_emb=False):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict = {}
- for k, v in state_dict.items():
- try:
- if k == 'emb_g.weight':
- if drop_speaker_emb:
- new_state_dict[k] = v
- continue
- v[:saved_state_dict[k].shape[0], :] = saved_state_dict[k]
- new_state_dict[k] = v
- else:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})".format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
- iteration, checkpoint_path))
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- torch.save({'model': state_dict,
- 'iteration': iteration,
- 'optimizer': optimizer.state_dict() if optimizer is not None else None,
- 'learning_rate': learning_rate}, checkpoint_path)
-
-
-def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
- for k, v in scalars.items():
- writer.add_scalar(k, v, global_step)
- for k, v in histograms.items():
- writer.add_histogram(k, v, global_step)
- for k, v in images.items():
- writer.add_image(k, v, global_step, dataformats='HWC')
- for k, v in audios.items():
- writer.add_audio(k, v, global_step, audio_sampling_rate)
-
-
-def extract_digits(f):
- digits = "".join(filter(str.isdigit, f))
- return int(digits) if digits else -1
-
-
-def latest_checkpoint_path(dir_path, regex="G_[0-9]*.pth"):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: extract_digits(f))
- x = f_list[-1]
- print(f"latest_checkpoint_path:{x}")
- return x
-
-
-def oldest_checkpoint_path(dir_path, regex="G_[0-9]*.pth", preserved=4):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: extract_digits(f))
- if len(f_list) > preserved:
- x = f_list[0]
- print(f"oldest_checkpoint_path:{x}")
- return x
- return ""
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def str2bool(v):
- if isinstance(v, bool):
- return v
- if v.lower() in ('yes', 'true', 't', 'y', '1'):
- return True
- elif v.lower() in ('no', 'false', 'f', 'n', '0'):
- return False
- else:
- raise argparse.ArgumentTypeError('Boolean value expected.')
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/modified_finetune_speaker.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, default="pretrained_models",
- help='Model name')
- parser.add_argument('-n', '--max_epochs', type=int, default=50,
- help='finetune epochs')
- parser.add_argument('--cont', type=str2bool, default=False,
- help='whether to continue training on the latest checkpoint')
- parser.add_argument('--drop_speaker_embed', type=str2bool, default=False,
- help='whether to drop existing characters')
- parser.add_argument('--train_with_pretrained_model', type=str2bool, default=True,
- help='whether to train with pretrained model')
- parser.add_argument('--preserved', type=int, default=4,
- help='Number of preserved models')
-
- args = parser.parse_args()
- model_dir = os.path.join("./", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- hparams.max_epochs = args.max_epochs
- hparams.cont = args.cont
- hparams.drop_speaker_embed = args.drop_speaker_embed
- hparams.train_with_pretrained_model = args.train_with_pretrained_model
- hparams.preserved = args.preserved
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r", encoding="utf-8") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/dotmet/Real-ESRGAN-Enhanced-Anime-Diffusion/interface.py b/spaces/dotmet/Real-ESRGAN-Enhanced-Anime-Diffusion/interface.py
deleted file mode 100644
index 6e279a2c1e37f066669920b229665aba68ea8017..0000000000000000000000000000000000000000
--- a/spaces/dotmet/Real-ESRGAN-Enhanced-Anime-Diffusion/interface.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import cv2
-from PIL import Image
-import glob
-import os
-from basicsr.archs.rrdbnet_arch import RRDBNet
-from basicsr.utils.download_util import load_file_from_url
-
-from realesrgan import RealESRGANer
-from realesrgan.archs.srvgg_arch import SRVGGNetCompact
-
-def realEsrgan(model_name="RealESRGAN_x4plus_anime_6B",
- model_path = None,
- input_dir = 'inputs',
- output_dir = 'results',
- denoise_strength = 0.5,
- outscale = 4,
- suffix = 'out',
- tile = 200,
- tile_pad = 10,
- pre_pad = 0,
- face_enhance = True,
- alpha_upsampler = 'realsrgan',
- out_ext = 'auto',
- fp32 = True,
- gpu_id = None,
- ):
-
- # determine models according to model names
- model_name = model_name.split('.')[0]
- if model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
- netscale = 4
- file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']
- elif model_name == 'RealESRNet_x4plus': # x4 RRDBNet model
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
- netscale = 4
- file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']
- elif model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
- netscale = 4
- file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']
- elif model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
- netscale = 2
- file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']
- elif model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size)
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
- netscale = 4
- file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth']
- elif model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size)
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
- netscale = 4
- file_url = [
- 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth',
- 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
- ]
-
- # determine model paths
- if model_path is None:
- model_path = os.path.join('weights', model_name + '.pth')
- if not os.path.isfile(model_path):
- ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
- for url in file_url:
- # model_path will be updated
- model_path = load_file_from_url(
- url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
-
- # use dni to control the denoise strength
- dni_weight = None
- if model_name == 'realesr-general-x4v3' and denoise_strength != 1:
- wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3')
- model_path = [model_path, wdn_model_path]
- dni_weight = [denoise_strength, 1 - denoise_strength]
-
- # restorer
- upsampler = RealESRGANer(
- scale=netscale,
- model_path=model_path,
- dni_weight=dni_weight,
- model=model,
- tile=tile,
- tile_pad=tile_pad,
- pre_pad=pre_pad,
- half=not fp32,
- gpu_id=gpu_id)
-
- if face_enhance: # Use GFPGAN for face enhancement
- from gfpgan import GFPGANer
- face_enhancer = GFPGANer(
- model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',
- upscale=outscale,
- arch='clean',
- channel_multiplier=2,
- bg_upsampler=upsampler)
- os.makedirs(output_dir, exist_ok=True)
-
- if os.path.isfile(input_dir):
- paths = [input_dir]
- else:
- paths = sorted(glob.glob(os.path.join(input_dir, '*')))
-
- Imgs = []
- for idx, path in enumerate(paths):
- imgname, extension = os.path.splitext(os.path.basename(path))
- print(f'Scaling x{outscale}:', path)
-
- img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
- if len(img.shape) == 3 and img.shape[2] == 4:
- img_mode = 'RGBA'
- else:
- img_mode = None
-
- try:
- if face_enhance:
- _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
- else:
- output, _ = upsampler.enhance(img, outscale=outscale)
- except RuntimeError as error:
- print('Error', error)
- print('If you encounter CUDA or RAM out of memory, try to set --tile with a smaller number.')
- else:
- if out_ext == 'auto':
- extension = extension[1:]
- else:
- extension = out_ext
- if img_mode == 'RGBA': # RGBA images should be saved in png format
- extension = 'png'
- if suffix == '':
- save_path = os.path.join(output_dir, f'{imgname}.{extension}')
- else:
- save_path = os.path.join(output_dir, f'{imgname}_{suffix}.{extension}')
-
- cv2.imwrite(save_path, output)
-
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img = Image.fromarray(img)
- Imgs.append(img)
-
- return Imgs
-
diff --git a/spaces/duong11111/ChatGPT4.0/app.py b/spaces/duong11111/ChatGPT4.0/app.py
deleted file mode 100644
index 7e09e57ef928fd2451fd0ed1295d0994ca75d026..0000000000000000000000000000000000000000
--- a/spaces/duong11111/ChatGPT4.0/app.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import gradio as gr
-import os
-import json
-import requests
-
-#Streaming endpoint
-API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
-
-#Huggingface provided GPT4 OpenAI API Key
-OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
-
-#Inferenec function
-def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
-
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {OPENAI_API_KEY}"
- }
- print(f"system message is ^^ {system_msg}")
- if system_msg.strip() == '':
- initial_message = [{"role": "user", "content": f"{inputs}"},]
- multi_turn_message = []
- else:
- initial_message= [{"role": "system", "content": system_msg},
- {"role": "user", "content": f"{inputs}"},]
- multi_turn_message = [{"role": "system", "content": system_msg},]
-
- if chat_counter == 0 :
- payload = {
- "model": "gpt-4",
- "messages": initial_message ,
- "temperature" : 1.0,
- "top_p":1.0,
- "n" : 1,
- "stream": True,
- "presence_penalty":0,
- "frequency_penalty":0,
- }
- print(f"chat_counter - {chat_counter}")
- else: #if chat_counter != 0 :
- messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},]
- for data in chatbot:
- user = {}
- user["role"] = "user"
- user["content"] = data[0]
- assistant = {}
- assistant["role"] = "assistant"
- assistant["content"] = data[1]
- messages.append(user)
- messages.append(assistant)
- temp = {}
- temp["role"] = "user"
- temp["content"] = inputs
- messages.append(temp)
- #messages
- payload = {
- "model": "gpt-4",
- "messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}],
- "temperature" : temperature, #1.0,
- "top_p": top_p, #1.0,
- "n" : 1,
- "stream": True,
- "presence_penalty":0,
- "frequency_penalty":0,}
-
- chat_counter+=1
-
- history.append(inputs)
- print(f"Logging : payload is - {payload}")
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
- print(f"Logging : response code - {response}")
- token_counter = 0
- partial_words = ""
-
- counter=0
- for chunk in response.iter_lines():
- #Skipping first chunk
- if counter == 0:
- counter+=1
- continue
- # check whether each line is non-empty
- if chunk.decode() :
- chunk = chunk.decode()
- # decode each line as response data is in bytes
- if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
- partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
- if token_counter == 0:
- history.append(" " + partial_words)
- else:
- history[-1] = partial_words
- chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
- token_counter+=1
- yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
-
-#Resetting to blank
-def reset_textbox():
- return gr.update(value='')
-
-#to set a component as visible=False
-def set_visible_false():
- return gr.update(visible=False)
-
-#to set a component as visible=True
-def set_visible_true():
- return gr.update(visible=True)
-
-title = """🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming """
-
-#display message for themes feature
-theme_addon_msg = """🌟 Discover Gradio Themes with this Demo, featuring v3.22.0! Gradio v3.23.0 also enables seamless Theme sharing. You can develop or modify a theme, and send it to the hub using simple theme.push_to_hub()
.
- 🏆Participate in Gradio's Theme Building Hackathon to exhibit your creative flair and win fabulous rewards! Join here - Gradio-Themes-Party🎨 🏆
-"""
-
-#Using info to add additional information about System message in GPT4
-system_msg_info = """A conversation could begin with a system message to gently instruct the assistant.
-System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'"""
-
-#Modifying existing Gradio Theme
-theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green",
- text_size=gr.themes.sizes.text_lg)
-
-with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
- theme=theme) as demo:
- gr.HTML(title)
- gr.HTML("""🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌""")
- gr.HTML(theme_addon_msg)
- gr.HTML(''' Duplicate the Space and run securely with your OpenAI API Key ''')
-
- with gr.Column(elem_id = "col_container"):
- #GPT4 API Key is provided by Huggingface
- with gr.Accordion(label="System message:", open=False):
- system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="")
- accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False)
- chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot")
- inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
- state = gr.State([])
- with gr.Row():
- with gr.Column(scale=7):
- b1 = gr.Button().style(full_width=True)
- with gr.Column(scale=3):
- server_status_code = gr.Textbox(label="Status code from OpenAI server", )
-
- #top_p, temperature
- with gr.Accordion("Parameters", open=False):
- top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
- temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
- chat_counter = gr.Number(value=0, visible=False, precision=0)
-
- #Event handling
- inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
- b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
-
- inputs.submit(set_visible_false, [], [system_msg])
- b1.click(set_visible_false, [], [system_msg])
- inputs.submit(set_visible_true, [], [accordion_msg])
- b1.click(set_visible_true, [], [accordion_msg])
-
- b1.click(reset_textbox, [], [inputs])
- inputs.submit(reset_textbox, [], [inputs])
-
- #Examples
- with gr.Accordion(label="Examples for System message:", open=False):
- gr.Examples(
- examples = [["""You are an AI programming assistant.
-
- - Follow the user's requirements carefully and to the letter.
- - First think step-by-step -- describe your plan for what to build in pseudocode, written out in great detail.
- - Then output the code in a single code block.
- - Minimize any other prose."""], ["""You are ComedianGPT who is a helpful assistant. You answer everything with a joke and witty replies."""],
- ["You are ChefGPT, a helpful assistant who answers questions with culinary expertise and a pinch of humor."],
- ["You are FitnessGuruGPT, a fitness expert who shares workout tips and motivation with a playful twist."],
- ["You are SciFiGPT, an AI assistant who discusses science fiction topics with a blend of knowledge and wit."],
- ["You are PhilosopherGPT, a thoughtful assistant who responds to inquiries with philosophical insights and a touch of humor."],
- ["You are EcoWarriorGPT, a helpful assistant who shares environment-friendly advice with a lighthearted approach."],
- ["You are MusicMaestroGPT, a knowledgeable AI who discusses music and its history with a mix of facts and playful banter."],
- ["You are SportsFanGPT, an enthusiastic assistant who talks about sports and shares amusing anecdotes."],
- ["You are TechWhizGPT, a tech-savvy AI who can help users troubleshoot issues and answer questions with a dash of humor."],
- ["You are FashionistaGPT, an AI fashion expert who shares style advice and trends with a sprinkle of wit."],
- ["You are ArtConnoisseurGPT, an AI assistant who discusses art and its history with a blend of knowledge and playful commentary."],
- ["You are a helpful assistant that provides detailed and accurate information."],
- ["You are an assistant that speaks like Shakespeare."],
- ["You are a friendly assistant who uses casual language and humor."],
- ["You are a financial advisor who gives expert advice on investments and budgeting."],
- ["You are a health and fitness expert who provides advice on nutrition and exercise."],
- ["You are a travel consultant who offers recommendations for destinations, accommodations, and attractions."],
- ["You are a movie critic who shares insightful opinions on films and their themes."],
- ["You are a history enthusiast who loves to discuss historical events and figures."],
- ["You are a tech-savvy assistant who can help users troubleshoot issues and answer questions about gadgets and software."],
- ["You are an AI poet who can compose creative and evocative poems on any given topic."],],
- inputs = system_msg,)
-
-demo.queue(max_size=99, concurrency_count=20).launch(debug=True)
\ No newline at end of file
diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py
deleted file mode 100644
index af04e614c8f1ac43faf363b1a9f6bfd667fbde21..0000000000000000000000000000000000000000
--- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import commons
-import models
-
-import math
-from torch import nn
-from torch.nn import functional as F
-
-import modules
-import attentions
-
-from torch.nn import Conv1d, ConvTranspose1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- emotion_embedding):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emotion_embedding = emotion_embedding
-
- if self.n_vocab != 0:
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- if emotion_embedding:
- self.emo_proj = nn.Linear(1024, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, emotion_embedding=None):
- if self.n_vocab != 0:
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- if emotion_embedding is not None:
- print("emotion added")
- x = x + self.emo_proj(emotion_embedding.unsqueeze(1))
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class SynthesizerTrn(models.SynthesizerTrn):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- use_sdp=True,
- emotion_embedding=False,
- ONNX_dir="./ONNX_net/",
- **kwargs):
-
- super().__init__(
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=n_speakers,
- gin_channels=gin_channels,
- use_sdp=use_sdp,
- **kwargs
- )
- self.ONNX_dir = ONNX_dir
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- emotion_embedding)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
-
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None,
- emotion_embedding=None):
- from ONNXVITS_utils import runonnx
- with torch.no_grad():
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
-
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- # logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
- logw = runonnx(f"{self.ONNX_dir}dp.onnx", x=x.numpy(), x_mask=x_mask.numpy(), g=g.numpy())
- logw = torch.from_numpy(logw[0])
-
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
- 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
-
- # z = self.flow(z_p, y_mask, g=g, reverse=True)
- z = runonnx(f"{self.ONNX_dir}flow.onnx", z_p=z_p.numpy(), y_mask=y_mask.numpy(), g=g.numpy())
- z = torch.from_numpy(z[0])
-
- # o = self.dec((z * y_mask)[:,:,:max_len], g=g)
- o = runonnx(f"{self.ONNX_dir}dec.onnx", z_in=(z * y_mask)[:, :, :max_len].numpy(), g=g.numpy())
- o = torch.from_numpy(o[0])
-
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
\ No newline at end of file
diff --git a/spaces/editing-images/project/static/js/bulma-carousel.min.js b/spaces/editing-images/project/static/js/bulma-carousel.min.js
deleted file mode 100644
index 5fff0695f00cf9da60dd87aa72c51367b00e92ff..0000000000000000000000000000000000000000
--- a/spaces/editing-images/project/static/js/bulma-carousel.min.js
+++ /dev/null
@@ -1 +0,0 @@
-!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaCarousel=e():t.bulmaCarousel=e()}("undefined"!=typeof self?self:this,function(){return function(i){var n={};function s(t){if(n[t])return n[t].exports;var e=n[t]={i:t,l:!1,exports:{}};return i[t].call(e.exports,e,e.exports,s),e.l=!0,e.exports}return s.m=i,s.c=n,s.d=function(t,e,i){s.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:i})},s.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return s.d(e,"a",e),e},s.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},s.p="",s(s.s=5)}([function(t,e,i){"use strict";i.d(e,"d",function(){return s}),i.d(e,"e",function(){return r}),i.d(e,"b",function(){return o}),i.d(e,"c",function(){return a}),i.d(e,"a",function(){return l});var n=i(2),s=function(e,t){(t=Array.isArray(t)?t:t.split(" ")).forEach(function(t){e.classList.remove(t)})},r=function(t){return t.getBoundingClientRect().width||t.offsetWidth},o=function(t){return t.getBoundingClientRect().height||t.offsetHeight},a=function(t){var e=1=t._x&&this._x<=e._x&&this._y>=t._y&&this._y<=e._y}},{key:"constrain",value:function(t,e){if(t._x>e._x||t._y>e._y)return this;var i=this._x,n=this._y;return null!==t._x&&(i=Math.max(i,t._x)),null!==e._x&&(i=Math.min(i,e._x)),null!==t._y&&(n=Math.max(n,t._y)),null!==e._y&&(n=Math.min(n,e._y)),new s(i,n)}},{key:"reposition",value:function(t){t.style.top=this._y+"px",t.style.left=this._x+"px"}},{key:"toString",value:function(){return"("+this._x+","+this._y+")"}},{key:"x",get:function(){return this._x},set:function(){var t=0this.state.length-this.slidesToShow&&!this.options.centerMode?this.state.next=this.state.index:this.state.next=this.state.index+this.slidesToScroll,this.show()}},{key:"previous",value:function(){this.options.loop||this.options.infinite||0!==this.state.index?this.state.next=this.state.index-this.slidesToScroll:this.state.next=this.state.index,this.show()}},{key:"start",value:function(){this._autoplay.start()}},{key:"pause",value:function(){this._autoplay.pause()}},{key:"stop",value:function(){this._autoplay.stop()}},{key:"show",value:function(t){var e=1this.options.slidesToShow&&(this.options.slidesToScroll=this.slidesToShow),this._breakpoint.init(),this.state.index>=this.state.length&&0!==this.state.index&&(this.state.index=this.state.index-this.slidesToScroll),this.state.length<=this.slidesToShow&&(this.state.index=0),this._ui.wrapper.appendChild(this._navigation.init().render()),this._ui.wrapper.appendChild(this._pagination.init().render()),this.options.navigationSwipe?this._swipe.bindEvents():this._swipe._bindEvents(),this._breakpoint.apply(),this._slides.forEach(function(t){return e._ui.container.appendChild(t)}),this._transitioner.init().apply(!0,this._setHeight.bind(this)),this.options.autoplay&&this._autoplay.init().start()}},{key:"destroy",value:function(){var e=this;this._unbindEvents(),this._items.forEach(function(t){e.element.appendChild(t)}),this.node.remove()}},{key:"id",get:function(){return this._id}},{key:"index",set:function(t){this._index=t},get:function(){return this._index}},{key:"length",set:function(t){this._length=t},get:function(){return this._length}},{key:"slides",get:function(){return this._slides},set:function(t){this._slides=t}},{key:"slidesToScroll",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToScroll():1}},{key:"slidesToShow",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToShow():1}},{key:"direction",get:function(){return"rtl"===this.element.dir.toLowerCase()||"rtl"===this.element.style.direction?"rtl":"ltr"}},{key:"wrapper",get:function(){return this._ui.wrapper}},{key:"wrapperWidth",get:function(){return this._wrapperWidth||0}},{key:"container",get:function(){return this._ui.container}},{key:"containerWidth",get:function(){return this._containerWidth||0}},{key:"slideWidth",get:function(){return this._slideWidth||0}},{key:"transitioner",get:function(){return this._transitioner}}],[{key:"attach",value:function(){var i=this,t=0>t/4).toString(16)})}},function(t,e,i){"use strict";var n=i(3),s=i(8),r=function(){function n(t,e){for(var i=0;i=t.slider.state.length-t.slider.slidesToShow&&!t.slider.options.loop&&!t.slider.options.infinite?t.stop():t.slider.next())},this.slider.options.autoplaySpeed))}},{key:"stop",value:function(){this._interval=clearInterval(this._interval),this.emit("stop",this)}},{key:"pause",value:function(){var t=this,e=0parseInt(e.changePoint,10)}),this._currentBreakpoint=this._getActiveBreakpoint(),this}},{key:"destroy",value:function(){this._unbindEvents()}},{key:"_bindEvents",value:function(){window.addEventListener("resize",this[s]),window.addEventListener("orientationchange",this[s])}},{key:"_unbindEvents",value:function(){window.removeEventListener("resize",this[s]),window.removeEventListener("orientationchange",this[s])}},{key:"_getActiveBreakpoint",value:function(){var t=!0,e=!1,i=void 0;try{for(var n,s=this.options.breakpoints[Symbol.iterator]();!(t=(n=s.next()).done);t=!0){var r=n.value;if(r.changePoint>=window.innerWidth)return r}}catch(t){e=!0,i=t}finally{try{!t&&s.return&&s.return()}finally{if(e)throw i}}return this._defaultBreakpoint}},{key:"getSlidesToShow",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToShow:this._defaultBreakpoint.slidesToShow}},{key:"getSlidesToScroll",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToScroll:this._defaultBreakpoint.slidesToScroll}},{key:"apply",value:function(){this.slider.state.index>=this.slider.state.length&&0!==this.slider.state.index&&(this.slider.state.index=this.slider.state.index-this._currentBreakpoint.slidesToScroll),this.slider.state.length<=this._currentBreakpoint.slidesToShow&&(this.slider.state.index=0),this.options.loop&&this.slider._loop.init().apply(),this.options.infinite&&this.slider._infinite.init().apply(),this.slider._setDimensions(),this.slider._transitioner.init().apply(!0,this.slider._setHeight.bind(this.slider)),this.slider._setClasses(),this.slider._navigation.refresh(),this.slider._pagination.refresh()}},{key:s,value:function(t){var e=this._getActiveBreakpoint();e.slidesToShow!==this._currentBreakpoint.slidesToShow&&(this._currentBreakpoint=e,this.apply())}}]),e}();e.a=r},function(t,e,i){"use strict";var n=function(){function n(t,e){for(var i=0;ithis.slider.state.length-1-this._infiniteCount;i-=1)e=i-1,t.unshift(this._cloneSlide(this.slider.slides[e],e-this.slider.state.length));for(var n=[],s=0;s=this.slider.state.length?(this.slider.state.index=this.slider.state.next=this.slider.state.next-this.slider.state.length,this.slider.transitioner.apply(!0)):this.slider.state.next<0&&(this.slider.state.index=this.slider.state.next=this.slider.state.length+this.slider.state.next,this.slider.transitioner.apply(!0)))}},{key:"_cloneSlide",value:function(t,e){var i=t.cloneNode(!0);return i.dataset.sliderIndex=e,i.dataset.cloned=!0,(i.querySelectorAll("[id]")||[]).forEach(function(t){t.setAttribute("id","")}),i}}]),e}();e.a=s},function(t,e,i){"use strict";var n=i(12),s=function(){function n(t,e){for(var i=0;ithis.slider.state.length-this.slider.slidesToShow&&Object(n.a)(this.slider._slides[this.slider.state.length-1],this.slider.wrapper)?this.slider.state.next=0:this.slider.state.next=Math.min(Math.max(this.slider.state.next,0),this.slider.state.length-this.slider.slidesToShow):this.slider.state.next=0:this.slider.state.next<=0-this.slider.slidesToScroll?this.slider.state.next=this.slider.state.length-this.slider.slidesToShow:this.slider.state.next=0)}}]),e}();e.a=r},function(t,e,i){"use strict";i.d(e,"a",function(){return n});var n=function(t,e){var i=t.getBoundingClientRect();return e=e||document.documentElement,0<=i.top&&0<=i.left&&i.bottom<=(window.innerHeight||e.clientHeight)&&i.right<=(window.innerWidth||e.clientWidth)}},function(t,e,i){"use strict";var n=i(14),s=i(1),r=function(){function n(t,e){for(var i=0;ithis.slider.slidesToShow?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.remove("is-hidden"),0===this.slider.state.next?(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.remove("is-hidden")):this.slider.state.next>=this.slider.state.length-this.slider.slidesToShow&&!this.slider.options.centerMode?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden")):this.slider.state.next>=this.slider.state.length-1&&this.slider.options.centerMode&&(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden"))):(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.add("is-hidden")))}},{key:"render",value:function(){return this.node}}]),e}();e.a=o},function(t,e,i){"use strict";e.a=function(t){return''+t.previous+'
\n'+t.next+"
"}},function(t,e,i){"use strict";var n=i(16),s=i(17),r=i(1),o=function(){function n(t,e){for(var i=0;ithis.slider.slidesToShow){for(var t=0;t<=this._count;t++){var e=document.createRange().createContextualFragment(Object(s.a)()).firstChild;e.dataset.index=t*this.slider.slidesToScroll,this._pages.push(e),this._ui.container.appendChild(e)}this._bindEvents()}}},{key:"onPageClick",value:function(t){this._supportsPassive||t.preventDefault(),this.slider.state.next=t.currentTarget.dataset.index,this.slider.show()}},{key:"onResize",value:function(){this._draw()}},{key:"refresh",value:function(){var e=this,t=void 0;(t=this.slider.options.infinite?Math.ceil(this.slider.state.length-1/this.slider.slidesToScroll):Math.ceil((this.slider.state.length-this.slider.slidesToShow)/this.slider.slidesToScroll))!==this._count&&(this._count=t,this._draw()),this._pages.forEach(function(t){t.classList.remove("is-active"),parseInt(t.dataset.index,10)===e.slider.state.next%e.slider.state.length&&t.classList.add("is-active")})}},{key:"render",value:function(){return this.node}}]),e}();e.a=a},function(t,e,i){"use strict";e.a=function(){return''}},function(t,e,i){"use strict";e.a=function(){return'
'}},function(t,e,i){"use strict";var n=i(4),s=i(1),r=function(){function n(t,e){for(var i=0;iMath.abs(this._lastTranslate.y)&&(this._supportsPassive||t.preventDefault(),t.stopPropagation())}}},{key:"onStopDrag",value:function(t){this._origin&&this._lastTranslate&&(Math.abs(this._lastTranslate.x)>.2*this.width?this._lastTranslate.x<0?this.slider.next():this.slider.previous():this.slider.show(!0)),this._origin=null,this._lastTranslate=null}}]),e}();e.a=o},function(t,e,i){"use strict";var n=i(20),s=i(21),r=function(){function n(t,e){for(var i=0;it.x?(s.x=0,this.slider.state.next=0):this.options.vertical&&Math.abs(this._position.y)>t.y&&(s.y=0,this.slider.state.next=0)),this._position.x=s.x,this._position.y=s.y,this.options.centerMode&&(this._position.x=this._position.x+this.slider.wrapperWidth/2-Object(o.e)(i)/2),"rtl"===this.slider.direction&&(this._position.x=-this._position.x,this._position.y=-this._position.y),this.slider.container.style.transform="translate3d("+this._position.x+"px, "+this._position.y+"px, 0)",n.x>t.x&&this.slider.transitioner.end()}}},{key:"onTransitionEnd",value:function(t){"translate"===this.options.effect&&(this.transitioner.isAnimating()&&t.target==this.slider.container&&this.options.infinite&&this.slider._infinite.onTransitionEnd(t),this.transitioner.end())}}]),n}();e.a=n},function(t,e,i){"use strict";e.a={initialSlide:0,slidesToScroll:1,slidesToShow:1,navigation:!0,navigationKeys:!0,navigationSwipe:!0,pagination:!0,loop:!1,infinite:!1,effect:"translate",duration:300,timing:"ease",autoplay:!1,autoplaySpeed:3e3,pauseOnHover:!0,breakpoints:[{changePoint:480,slidesToShow:1,slidesToScroll:1},{changePoint:640,slidesToShow:2,slidesToScroll:2},{changePoint:768,slidesToShow:3,slidesToScroll:3}],onReady:null,icons:{previous:'\n \n ',next:'\n \n '}}},function(t,e,i){"use strict";e.a=function(t){return''}},function(t,e,i){"use strict";e.a=function(){return'
'}}]).default});
\ No newline at end of file
diff --git a/spaces/elplaguister/Yuuka_TTS/src/modules.py b/spaces/elplaguister/Yuuka_TTS/src/modules.py
deleted file mode 100644
index 4036479a37599788c49b02225e5dd88107ff11d9..0000000000000000000000000000000000000000
--- a/spaces/elplaguister/Yuuka_TTS/src/modules.py
+++ /dev/null
@@ -1,390 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-from src import commons
-from src.commons import init_weights, get_padding
-from src.transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(
- nn.ReLU(),
- nn.Dropout(p_dropout))
- for _ in range(n_layers-1):
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size ** i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
- groups=channels, dilation=dilation, padding=padding
- ))
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
- super(WN, self).__init__()
- assert(kernel_size % 2 == 1)
- self.hidden_channels =hidden_channels
- self.kernel_size = kernel_size,
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
- for i in range(n_layers):
- dilation = dilation_rate ** i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
- dilation=dilation, padding=padding)
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(
- x_in,
- g_l,
- n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:,self.hidden_channels:,:]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2])))
- ])
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1)))
- ])
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1])))
- ])
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels,1))
- self.logs = nn.Parameter(torch.zeros(channels,1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1,2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
-
-class ConvFlow(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/evaluate-metric/squad/README.md b/spaces/evaluate-metric/squad/README.md
deleted file mode 100644
index 08e030c599698bd0bdf7aa986f1bc0c14bb792cf..0000000000000000000000000000000000000000
--- a/spaces/evaluate-metric/squad/README.md
+++ /dev/null
@@ -1,110 +0,0 @@
----
-title: SQuAD
-emoji: 🤗
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
-tags:
-- evaluate
-- metric
-description: >-
- This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
-
- Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
- crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
- from the corresponding reading passage, or the question might be unanswerable.
----
-
-# Metric Card for SQuAD
-
-## Metric description
-This metric wraps the official scoring script for version 1 of the [Stanford Question Answering Dataset (SQuAD)](https://huggingface.co/datasets/squad).
-
-SQuAD is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.
-
-## How to use
-
-The metric takes two files or two lists of question-answers dictionaries as inputs : one with the predictions of the model and the other with the references to be compared to:
-
-```python
-from evaluate import load
-squad_metric = load("squad")
-results = squad_metric.compute(predictions=predictions, references=references)
-```
-## Output values
-
-This metric outputs a dictionary with two values: the average exact match score and the average [F1 score](https://huggingface.co/metrics/f1).
-
-```
-{'exact_match': 100.0, 'f1': 100.0}
-```
-
-The range of `exact_match` is 0-100, where 0.0 means no answers were matched and 100.0 means all answers were matched.
-
-The range of `f1` is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall.
-
-### Values from popular papers
-The [original SQuAD paper](https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf) reported an F1 score of 51.0% and an Exact Match score of 40.0%. They also report that human performance on the dataset represents an F1 score of 90.5% and an Exact Match score of 80.3%.
-
-For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/squad).
-
-## Examples
-
-Maximal values for both exact match and F1 (perfect match):
-
-```python
-from evaluate import load
-squad_metric = load("squad")
-predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
-references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
-results = squad_metric.compute(predictions=predictions, references=references)
-results
-{'exact_match': 100.0, 'f1': 100.0}
-```
-
-Minimal values for both exact match and F1 (no match):
-
-```python
-from evaluate import load
-squad_metric = load("squad")
-predictions = [{'prediction_text': '1999', 'id': '56e10a3be3433e1400422b22'}]
-references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
-results = squad_metric.compute(predictions=predictions, references=references)
-results
-{'exact_match': 0.0, 'f1': 0.0}
-```
-
-Partial match (2 out of 3 answers correct) :
-
-```python
-from evaluate import load
-squad_metric = load("squad")
-predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}, {'prediction_text': 'Beyonce', 'id': '56d2051ce7d4791d0090260b'}, {'prediction_text': 'climate change', 'id': '5733b5344776f419006610e1'}]
-references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}, {'answers': {'answer_start': [233], 'text': ['Beyoncé and Bruno Mars']}, 'id': '56d2051ce7d4791d0090260b'}, {'answers': {'answer_start': [891], 'text': ['climate change']}, 'id': '5733b5344776f419006610e1'}]
-results = squad_metric.compute(predictions=predictions, references=references)
-results
-{'exact_match': 66.66666666666667, 'f1': 66.66666666666667}
-```
-
-## Limitations and bias
-This metric works only with datasets that have the same format as [SQuAD v.1 dataset](https://huggingface.co/datasets/squad).
-
-The SQuAD dataset does contain a certain amount of noise, such as duplicate questions as well as missing answers, but these represent a minority of the 100,000 question-answer pairs. Also, neither exact match nor F1 score reflect whether models do better on certain types of questions (e.g. who questions) or those that cover a certain gender or geographical area -- carrying out more in-depth error analysis can complement these numbers.
-
-
-## Citation
-
- @inproceedings{Rajpurkar2016SQuAD10,
- title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
- author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
- booktitle={EMNLP},
- year={2016}
- }
-
-## Further References
-
-- [The Stanford Question Answering Dataset: Background, Challenges, Progress (blog post)](https://rajpurkar.github.io/mlx/qa-and-squad/)
-- [Hugging Face Course -- Question Answering](https://huggingface.co/course/chapter7/7)
diff --git a/spaces/failfast/2D-GameCreator/Dockerfile b/spaces/failfast/2D-GameCreator/Dockerfile
deleted file mode 100644
index a6a6a69b5ff641c6360aec9fc08cb8df9dbad434..0000000000000000000000000000000000000000
--- a/spaces/failfast/2D-GameCreator/Dockerfile
+++ /dev/null
@@ -1,63 +0,0 @@
-FROM node:18-alpine AS base
-
-# Install dependencies only when needed
-FROM base AS deps
-# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
-RUN apk add --no-cache libc6-compat
-WORKDIR /app
-
-# Install dependencies based on the preferred package manager
-COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./
-RUN \
- if [ -f yarn.lock ]; then yarn --frozen-lockfile; \
- elif [ -f package-lock.json ]; then npm ci; \
- elif [ -f pnpm-lock.yaml ]; then yarn global add pnpm && pnpm i --frozen-lockfile; \
- else echo "Lockfile not found." && exit 1; \
- fi
-
-# Uncomment the following lines if you want to use a secret at buildtime,
-# for example to access your private npm packages
-# RUN --mount=type=secret,id=HF_EXAMPLE_SECRET,mode=0444,required=true \
-# $(cat /run/secrets/HF_EXAMPLE_SECRET)
-
-# Rebuild the source code only when needed
-FROM base AS builder
-WORKDIR /app
-COPY --from=deps /app/node_modules ./node_modules
-COPY . .
-
-# Next.js collects completely anonymous telemetry data about general usage.
-# Learn more here: https://nextjs.org/telemetry
-# Uncomment the following line in case you want to disable telemetry during the build.
-# ENV NEXT_TELEMETRY_DISABLED 1
-
-# RUN yarn build
-
-# If you use yarn, comment out this line and use the line above
-RUN npm run build
-
-# Production image, copy all the files and run next
-FROM base AS runner
-WORKDIR /app
-
-ENV NODE_ENV production
-# Uncomment the following line in case you want to disable telemetry during runtime.
-# ENV NEXT_TELEMETRY_DISABLED 1
-
-RUN addgroup --system --gid 1001 nodejs
-RUN adduser --system --uid 1001 nextjs
-
-COPY --from=builder /app/public ./public
-
-# Automatically leverage output traces to reduce image size
-# https://nextjs.org/docs/advanced-features/output-file-tracing
-COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
-COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
-
-USER nextjs
-
-EXPOSE 3000
-
-ENV PORT 3000
-
-CMD ["node", "server.js"]
diff --git a/spaces/falterWliame/Face_Mask_Detection/Divinity Original Sin 2 Adult Mod.md b/spaces/falterWliame/Face_Mask_Detection/Divinity Original Sin 2 Adult Mod.md
deleted file mode 100644
index 69cfc16d8a945d3bea71c2ae9530bbea6a3dfa21..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Divinity Original Sin 2 Adult Mod.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Divinity Original Sin 2 Adult Mod Download File >>>>> https://urlca.com/2uDdMH
-
-Divinity Original Sin 2 Graphics Mod – HDR MOD / SweetFX ... Divinity Original Sin 2 PC ... Divinity 2 mods - Adult Gaming - LoversLab. www.loverslab.com. 4d29de3e1b
-
-
-
diff --git a/spaces/fatiXbelha/sd/Apk Mulung Koin TikTok Terbaru 2023 Cara Gampang Dapat Uang dari TikTok.md b/spaces/fatiXbelha/sd/Apk Mulung Koin TikTok Terbaru 2023 Cara Gampang Dapat Uang dari TikTok.md
deleted file mode 100644
index d0a560aad0cc47fc470c1533a0566ef6971c5b35..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Apk Mulung Koin TikTok Terbaru 2023 Cara Gampang Dapat Uang dari TikTok.md
+++ /dev/null
@@ -1,100 +0,0 @@
-
-Apk Mulung Koin TikTok: What Is It and How to Use It
-TikTok is one of the most popular social media platforms in Indonesia, with millions of users creating and sharing short videos every day. But did you know that you can also make money from TikTok? One way to do that is by using TikTok Coins, the in-app currency that you can buy or earn from other users. However, some people are looking for shortcuts to get free coins, such as using a modded application called Apk Mulung Koin TikTok. But what is this application, how does it work, and what are the risks and drawbacks of using it? In this article, we will explain everything you need to know about Apk Mulung Koin TikTok, and provide some alternatives to get coins legally and safely.
-apk mulung koin tiktok DOWNLOAD ⇒ https://urllie.com/2uNEll
- What Is TikTok and Why Is It Popular in Indonesia
-TikTok is a social media platform that allows users to create and share short videos, usually between 15 to 60 seconds long. Users can choose from a variety of filters, effects, stickers, music, sounds, and hashtags to make their videos more fun and creative. Users can also watch videos from other users, follow their favorite creators, comment, like, share, and chat with them.
-TikTok has various features and content categories that appeal to different audiences. For example, some users like to watch or make videos about comedy, dance, beauty, fashion, food, sports, education, travel, pets, art, gaming, and more. Some users also like to join challenges, duets, trends, or viral moments that are popular on the platform. Some users also like to use TikTok as a source of information, inspiration, entertainment, or social interaction.
-TikTok has a large and active user base in Indonesia, especially among young people. According to a report by App Annie in June 2020, Indonesia ranked as the second-largest market for TikTok downloads globally. According to another report by We Are Social
and Hootsuite in January 2020, Indonesia had 81.7 million active social media users, of which 22.1 million were TikTok users. This means that TikTok had a penetration rate of 27.1% among the social media users in Indonesia, making it the fourth most popular social media platform in the country, after Facebook, Instagram, and YouTube. TikTok also ranked as the second most downloaded app in Indonesia in 2020, according to App Annie.
- One of the reasons why TikTok is so popular in Indonesia is because it offers a platform for users to express themselves creatively and authentically, without being limited by language, culture, or location barriers. TikTok also provides a variety of content that caters to different interests and preferences, from comedy and music to education and social issues. TikTok also enables users to connect with each other and form communities based on shared passions and values.
- What Are TikTok Coins and How to Get Them
-TikTok Coins are the in-app currency that users can buy with real money or earn from other users. Users can buy coins through the official TikTok app, using various payment methods such as credit cards, debit cards, or mobile wallets. The price of coins may vary depending on the exchange rate and the country of purchase. For example, in the US, 100 coins cost $1.29, while in Indonesia, 100 coins cost Rp 14,000.
- TikTok Coins can be used to buy virtual gifts and send them to other users during livestreams. Livestreams are live video broadcasts that users can watch or create on TikTok. Users can interact with the livestreamer or other viewers through comments, likes, or gifts. Gifts are animated stickers or emojis that represent different amounts of coins. For example, a panda gift costs 5 coins, while a rainbow gift costs 100 coins. Sending gifts is a way to show appreciation, support, or admiration to the livestreamer or other users.
-Cara dapat koin tiktok saat live streaming
-Tips dan trik mulung koin tiktok gratis
-Aplikasi khusus untuk mulung koin tiktok
-Cara klaim koin tiktok dari peti harta karun
-Cara mencairkan koin tiktok menjadi uang asli
-Syarat dan ketentuan mulung koin tiktok
-Cara meningkatkan interaksi saat live tiktok
-Cara memilih akun yang membagikan koin tiktok
-Cara mengisi saldo koin tiktok untuk top up
-Cara memasukkan kode undangan tiktok untuk bonus
-Cara mendapatkan gift dari penonton live tiktok
-Daftar harga gift dan koin tiktok terbaru
-Cara menghindari virus saat mulung koin tiktok
-Cara menggunakan mod apk mulung koin tiktok
-Keuntungan dan kerugian mulung koin tiktok
-Cara mengatasi masalah saat mulung koin tiktok
-Tutorial lengkap mulung koin tiktok untuk pemula
-Cara mendapatkan banyak follower di tiktok
-Cara membuat video tiktok yang menarik dan viral
-Cara bekerja sama dengan brand di tiktok
-Cara menjual barang melalui tiktok shop
-Cara menjual produk orang lain dengan tiktok affiliate
-Cara membagikan link referral kepada pengguna baru tiktok
-Cara mendapatkan netizen point di tiktok
-Cara mendapatkan diamond di tiktok
-Cara mendapatkan like dan comment di tiktok
-Cara mendapatkan verifikasi akun di tiktok
-Cara menghapus akun tiktok permanen atau sementara
-Cara mengganti nama pengguna dan password di tiktok
-Cara mengubah bahasa dan negara di tiktok
-Cara mengaktifkan mode gelap atau terang di tiktok
-Cara mengatur privasi dan keamanan akun di tiktok
-Cara menghapus cache dan data aplikasi tiktok
-Cara mengunduh video dari tiktok tanpa watermark
-Cara menyimpan video ke galeri atau draft di tiktok
-Cara membagikan video ke media sosial lain dari tiktok
-Cara menambahkan musik atau suara ke video di tiktok
-Cara menambahkan teks atau stiker ke video di tiktok
-Cara menambahkan efek atau filter ke video di tiktok
-Cara menambahkan transisi atau animasi ke video di tiktok
-Cara menyesuaikan durasi atau kecepatan video di tiktok
-Cara merekam video dengan timer atau countdown di tiktok
-Cara merekam video dengan hands free atau voice control di tiktok
-Cara merekam video dengan duet atau react di tiktok
-Cara merekam video dengan green screen atau chroma key di tiktok
- TikTok Coins can also be exchanged for cash or other rewards through various methods. One of them is by converting coins into diamonds, which are another in-app currency that users can earn from receiving gifts during livestreams. One diamond is equivalent to one coin. Users can then withdraw diamonds as cash through PayPal or other third-party platforms, depending on their country and eligibility. The minimum amount of diamonds that can be withdrawn is 10,000, which is equivalent to $50. Another method is by participating in official challenges, events, or campaigns by TikTok or its partners, and earning rewards or prizes in exchange for coins.
- What Is Apk Mulung Koin TikTok and How Does It Work
-Apk Mulung Koin TikTok is a modded application that claims to help users get free coins from other users' livestreams. A modded application is an application that has been modified or hacked to alter its original features or functions. Apk Mulung Koin TikTok is not an official or authorized application by TikTok, and it is not available on the Google Play Store or the Apple App Store. Users have to download it from third-party websites or sources at their own risk.
- Apk Mulung Koin TikTok works by automatically finding and joining livestreams that have coin giveaways. Coin giveaways are livestreams where the livestreamer offers to send gifts or coins to some of the viewers who join their broadcast. Apk Mulung Koin TikTok claims to be able to detect these livestreams and join them on behalf of the user, without requiring them to watch or interact with the livestreamer.
- Apk Mulung Koin TikTok also claims to increase the chances of getting coins by tapping faster and more frequently than humanly possible. Tapping is a way to show interest or engagement during a livestream, and some livestreamers may reward their viewers who tap more often with gifts or coins. Apk Mulung Koin TikTok claims to be able to tap up to 10 times per second on the screen, which may increase the likelihood of receiving gifts or coins from the livestreamer.
- What Are the Risks and Drawbacks of Using Apk Mulung Koin TikTok
-Apk Mulung Koin TikTok may sound like an easy and convenient way to get free coins on TikTok, but it also comes with many risks and drawbacks that users should be aware of before using it.
- First of all, Apk Mulung Koin TikTok is not an official or authorized application by TikTok, and may violate its terms of service and policies. By using Apk Mulung Koin TikTok, users may be breaking the rules and regulations of the platform, and may face legal consequences or penalties. For example, TikTok may ban or suspend their accounts, delete their videos, or revoke their coins or diamonds. TikTok may also take legal action against them or the developers of Apk Mulung Koin TikTok for infringing its intellectual property rights or harming its reputation.
- Secondly, Apk Mulung Koin TikTok may contain malware or viruses that can harm your device or steal your personal information. Malware or viruses are malicious software or programs that can damage your device, corrupt your files, or access your data without your permission. By downloading Apk Mulung Koin TikTok from third-party websites or sources, you may expose your device to these risks, and compromise your security and privacy. For example, Apk Mulung Koin TikTok may collect your TikTok login credentials, access your contacts, messages, photos, videos, or other sensitive information, or install other unwanted or harmful applications on your device.
- Thirdly, Apk Mulung Koin TikTok may not work as advertised, or may result in your account being banned or suspended by TikTok. Apk Mulung Koin TikTok may not be able to find or join livestreams that have coin giveaways, or may not be able to tap faster or more frequently than other users. Apk Mulung Koin TikTok may also be detected by TikTok's security system, which may flag your account as suspicious or fraudulent, and prevent you from receiving or withdrawing coins or diamonds. Apk Mulung Koin TikTok may also cause your device to malfunction, crash, or freeze, due to its poor quality or compatibility issues.
- What Are the Alternatives to Apk Mulung Koin TikTok
-If you want to get coins on TikTok without using Apk Mulung Koin TikTok, there are some alternatives that you can try instead. These alternatives are legal and safe, and do not require you to download any modded applications or risk your account or device.
- One of them is by creating quality content and engaging with your audience on TikTok, and receiving gifts from them during livestreams. If you have a talent, skill, passion, or message that you want to share with the world, you can use TikTok as a platform to showcase it and attract followers who appreciate it. You can also interact with your followers and other users through comments, likes, shares, chats, duets, challenges, trends, and more. By doing so, you can build a loyal and supportive fan base who may reward you with gifts or coins during your livestreams.
- Another one is by participating in official challenges, events, or campaigns by TikTok or its partners, and earning rewards or prizes in exchange for coins. TikTok often launches various challenges, events, or campaigns that invite users to create and share videos on specific topics, themes, hashtags, or causes. Some of these challenges, events, or campaigns may offer rewards or prizes to the winners or participants who submit the best videos. These rewards or prizes may include coins, cash, or other rewards that you can use or enjoy. To participate in these challenges, events, or campaigns, you may need to use coins to enter or submit your videos, but the rewards or prizes may be worth more than the coins you spend.
- A third one is by buying coins with real money through the official TikTok app, and supporting your favorite creators or causes. If you have some spare money that you want to spend on TikTok, you can buy coins through the app using various payment methods, and use them to send gifts to other users during livestreams. By doing so, you can show your appreciation, support, or admiration to the creators or causes that you like or care about. You can also receive thank-you messages, shout-outs, or other benefits from the users who receive your gifts.
- Conclusion
-Apk Mulung Koin TikTok is a modded application that claims to help users get free coins from other users' livestreams on TikTok. However, it is not an official or authorized application by TikTok, and it may have many risks and drawbacks that users should be aware of before using it. Apk Mulung Koin TikTok may violate TikTok's terms of service and policies, contain malware or viruses, or result in your account being banned or suspended by TikTok. Apk Mulung Koin TikTok may also not work as advertised, or may cause your device to malfunction, crash, or freeze.
- If you want to get coins on TikTok without using Apk Mulung Koin TikTok, there are some alternatives that you can try instead. These alternatives are legal and safe, and do not require you to download any modded applications or risk your account or device. These alternatives are creating quality content and engaging with your audience on TikTok, participating in official challenges, events, or campaigns by TikTok or its partners, and buying coins with real money through the official TikTok app.
- We hope that this article has helped you understand what Apk Mulung Koin TikTok is and how to use it. We also hope that you have learned some alternatives to get coins on TikTok without using Apk Mulung Koin TikTok. Thank you for reading and happy TikToking!
- FAQs
-Here are some frequently asked questions about Apk Mulung Koin TikTok and its alternatives.
- Q: Is Apk Mulung Koin TikTok safe to use?
-A: No, Apk Mulung Koin TikTok is not safe to use. It is a modded application that is not an official or authorized application by TikTok. It may violate TikTok's terms of service and policies, contain malware or viruses, or result in your account being banned or suspended by TikTok. It may also not work as advertised, or may cause your device to malfunction, crash, or freeze.
- Q: How can I download Apk Mulung Koin TikTok?
-A: We do not recommend downloading Apk Mulung Koin TikTok because of the risks and drawbacks mentioned above. However, if you still want to download it at your own risk, you can find it on some third-party websites or sources that offer modded applications. You may need to enable unknown sources on your device settings to install it.
- Q: How can I get coins on TikTok without using Apk Mulung Koin TikTok?
-A: You can get coins on TikTok without using Apk Mulung Koin TikTok by creating quality content and engaging with your audience on TikTok, participating in official challenges, events, or campaigns by TikTok or its partners, or buying coins with real money through the official TikTok app. These alternatives are legal and safe, and do not require you to download any modded applications or risk your account or device.
- Q: How can I use coins on TikTok?
-A: You can use coins on TikTok to buy virtual gifts and send them to other users during livestreams. Gifts are animated stickers or emojis that represent different amounts of coins. Sending gifts is a way to show appreciation, support, or admiration to the livestreamer or other users. You can also exchange coins for cash or other rewards through various methods, such as converting them into diamonds and withdrawing them through PayPal or other third-party platforms, or participating in official challenges, events, or campaigns by TikTok or its partners.
- Q: How can I create quality content and engage with my audience on TikTok?
-A: You can create quality content and engage with your audience on TikTok by following these tips:
-
-Choose a niche, topic, theme, or style that you are passionate about and that suits your personality and skills.
-Use the various filters, effects, stickers, music, sounds, and hashtags that TikTok offers to make your videos more fun and creative.
-Join challenges, duets, trends, or viral moments that are popular on the platform, and add your own twist or perspective to them.
-Be consistent, authentic, and original in your content creation and posting schedule.
-Interact with your followers and other users through comments, likes, shares, chats, duets, challenges, trends, and more.
-Ask for feedback, suggestions, or opinions from your audience, and respond to them politely and respectfully.
-Collaborate with other creators who have similar or complementary niches, topics, themes, or styles.
-Livestream regularly and interact with your viewers in real time.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Boost Your Academic Performance with EDUCATION POINT ONLINE - Free APK Download.md b/spaces/fatiXbelha/sd/Boost Your Academic Performance with EDUCATION POINT ONLINE - Free APK Download.md
deleted file mode 100644
index 276ee12c449ab13f019a059f0492fcc2c8ad3d04..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Boost Your Academic Performance with EDUCATION POINT ONLINE - Free APK Download.md
+++ /dev/null
@@ -1,200 +0,0 @@
-
-Education Point Online: A Review of the App for College and University Students
-Are you a college or university student looking for a convenient and effective way to prepare for your internal exams in B.Tech/M.Tech/B.Arch? If yes, then you might want to check out Education Point Online, an app that provides you with video lectures, notes, tests, and more for various subjects. In this article, we will review Education Point Online and tell you everything you need to know about it. We will also show you how to download, install, and use the app on your device. So, let's get started!
-What is Education Point Online?
-Education Point Online is an education app developed by Education Thor Media. It is designed for college and university students who are preparing for internal exams in B.Tech/M.Tech/B.Arch. The app offers access to video lectures, notes, tests, quizzes, mock tests, doubt sessions, discussion forums, and more for various subjects. The app also provides personalized feedback and guidance from experienced teachers who can help you improve your performance. The app has been available since December 2021 and has been downloaded over 10 thousand times. It has a rating of 4.2 out of 5 stars on Google Play Store.
-education point online mod apk download Download Zip ⇒ https://urllie.com/2uNzTT
-Features and Benefits of Education Point Online
-Education Point Online has many features and benefits that make it a useful app for students. Here are some of them:
-- Access to video lectures, notes, and tests for various subjects
-The app covers a wide range of subjects such as Mathematics, Physics, Chemistry, Computer Science, Electrical Engineering, Mechanical Engineering, Civil Engineering, Architecture, etc. You can watch video lectures from expert teachers who explain the concepts in a simple and clear way. You can also access notes and tests that are based on the latest syllabus and exam pattern. You can learn at your own pace and convenience with the app.
-- Personalized feedback and guidance from experienced teachers
-The app also provides you with personalized feedback and guidance from experienced teachers who can help you improve your performance. You can ask questions and doubts to the teachers anytime through the app. You can also get tips and tricks on how to solve problems faster and better. The teachers will also monitor your progress and suggest areas of improvement.
-- Interactive quizzes and mock tests to assess your progress
-The app also has interactive quizzes and mock tests that you can take to assess your progress. The quizzes and mock tests are designed to test your knowledge and understanding of the topics. They also help you practice time management and accuracy skills. You can get instant results and analysis of your performance after taking the quizzes and mock tests.
-- Offline mode to download and watch lectures without internet
-The app also has an offline mode that allows you to download and watch lectures without internet connection. This is useful if you have limited or no internet access or if you want to save data. You can download the lectures of your choice and watch them later offline.
-- Live doubt sessions and discussion forums to clear your queries
-The app also has live doubt sessions and discussion forums
that you can join to clear your queries. The live doubt sessions are conducted by the teachers who can answer your questions and clear your doubts. The discussion forums are platforms where you can interact with other students and teachers and share your views and opinions on various topics. You can also learn from the experiences and insights of others through the discussion forums.
-How to Download and Install Education Point Online
-If you are interested in using Education Point Online, you need to download and install it on your device. Here are the steps to do so:
-- For Android devices
-If you have an Android device, you can download and install Education Point Online from Google Play Store. Here is how:
-education point online app free download
-education point online apk for android
-education point online modded apk latest version
-education point online app for b.tech/m.tech/b.arch students
-education point online apk download for pc
-education point online mod apk unlimited access
-education point online app by education thor media
-education point online apk 1.4.73.2
-education point online mod apk no ads
-education point online app for internal exams preparation
-education point online apk for ios
-education point online mod apk 2023
-education point online app review and rating
-education point online apk file size
-education point online mod apk free download link
-education point online app features and benefits
-education point online apk update history
-education point online mod apk download from appbrain
-education point online app permissions and technologies
-education point online apk download from apkonwindows
-education point online mod apk download from apkonline
-education point online app comments and feedback
-education point online apk download from google play store
-education point online mod apk download from uptodown
-education point online app alternatives and competitors
-education point online apk download from apkpure
-education point online mod apk download from apkmirror
-education point online app support and contact information
-education point online apk download from apksfull
-education point online mod apk download from apktada
-
-Open Google Play Store on your device and search for Education Point Online.
-Select the app from the search results and tap on Install.
-Wait for the app to download and install on your device.
-Once the app is installed, tap on Open to launch it.
-
-- For iOS devices
-If you have an iOS device, you can download and install Education Point Online from App Store. Here is how:
-
-Open App Store on your device and search for Education Point Online.
-Select the app from the search results and tap on Get.
-Enter your Apple ID and password if prompted.
-Wait for the app to download and install on your device.
-Once the app is installed, tap on Open to launch it.
-
-How to Use Education Point Online
-Once you have downloaded and installed Education Point Online, you can start using it to prepare for your internal exams. Here are the steps to use the app:
-- Register and log in with your credentials
-The first thing you need to do is to register and log in with your credentials. You can do this by following these steps:
-
-Open the app and tap on Register if you are a new user or Log In if you already have an account.
-Enter your name, email, phone number, password, and other details as required.
-Verify your email and phone number by entering the OTPs sent to them.
-Choose your course and branch from the list of options.
-Tap on Submit to complete your registration or log in.
- - Choose your course and subject
-After you have registered and logged in, you can choose your course and subject from the app. You can do this by following these steps:
-
-Tap on the Menu icon on the top left corner of the app.
-Tap on My Courses to see the list of courses available for you.
-Select the course that you want to study from the list.
-Tap on the subject that you want to study from the list of subjects under the course.
-You will see the overview of the subject, including the syllabus, objectives, outcomes, and duration.
-Tap on Start Learning to begin your learning journey.
-
-- Browse and watch the lectures, notes, and tests
-Once you have chosen your course and subject, you can browse and watch the lectures, notes, and tests for that subject. You can do this by following these steps:
-
-Tap on the Lectures tab to see the list of lectures available for that subject.
-Select the lecture that you want to watch from the list.
-You will see the video player, where you can play, pause, rewind, fast forward, and adjust the volume and speed of the video.
-You can also see the transcript, summary, and key points of the lecture below the video player.
-You can also download the lecture for offline viewing by tapping on the Download icon on the top right corner of the video player.
-Tap on the Notes tab to see the list of notes available for that subject.
-Select the note that you want to read from the list.
-You will see the note in a PDF format, where you can zoom in, zoom out, scroll, and bookmark pages.
-You can also download the note for offline reading by tapping on the Download icon on the top right corner of the PDF viewer.
-Tap on the Tests tab to see the list of tests available for that subject.
-Select the test that you want to take from the list.
-You will see the instructions, duration, number of questions, and marks of the test.
-Tap on Start Test to begin your test.
- - Take quizzes and mock tests to check your understanding
-After you have watched the lectures, read the notes, and taken the tests, you can take quizzes and mock tests to check your understanding of the subject. You can do this by following these steps:
-
-Tap on the Quizzes tab to see the list of quizzes available for that subject.
-Select the quiz that you want to take from the list.
-You will see the instructions, duration, number of questions, and marks of the quiz.
-Tap on Start Quiz to begin your quiz.
-You will see the questions one by one, where you have to choose the correct answer from the options given.
-You can skip or review any question by tapping on the Skip or Review buttons at the bottom of the screen.
-Once you have answered all the questions, tap on Submit Quiz to end your quiz.
-You will see your score and analysis of your performance after submitting the quiz.
-Tap on the Mock Tests tab to see the list of mock tests available for that subject.
-Select the mock test that you want to take from the list.
-You will see the instructions, duration, number of questions, and marks of the mock test.
-Tap on Start Mock Test to begin your mock test.
-You will see the questions one by one, where you have to choose the correct answer from the options given.
-You can skip or review any question by tapping on the Skip or Review buttons at the bottom of the screen.
-Once you have answered all the questions, tap on Submit Mock Test to end your mock test.
-You will see your score and analysis of your performance after submitting the mock test.
-
-- Ask doubts and interact with teachers and peers
-If you have any doubts or queries regarding any topic or question, you can ask them and interact with teachers and peers through the app. You can do this by following these steps:
-
-Tap on the Doubts tab to see the list of doubts posted by other students for that subject.
-Select the doubt that you want to see or answer from the list.
-You will see the doubt, along with the answers and comments from other students and teachers.
-You can also post your own doubt by tapping on the Ask Doubt button at the bottom of the screen.
-You can also join live doubt sessions conducted by teachers by tapping on the Live Doubt Sessions button at the top of the screen.
-Tap on the Forums tab to see the list of forums available for that subject.
-Select the forum that you want to join or create from the list.
-You will see the forum, along with the posts and comments from other students and teachers.
-You can also create your own forum by tapping on the Create Forum button at the bottom of the screen.
-
-Pros and Cons of Education Point Online
-Like any other app, Education Point Online has its pros and cons. Here are some of them:
-- Pros
-
-It provides a comprehensive and convenient way to prepare for internal exams in B.Tech/M.Tech/B.Arch.
-It covers a wide range of subjects and topics that are relevant and updated.
-It offers access to video lectures, notes, tests, quizzes, mock tests, doubt sessions, discussion forums, and more for each subject.
-It provides personalized feedback and guidance from experienced teachers who can help you improve your performance.
-It has an offline mode that allows you to download and watch lectures without internet connection.
-It has a user-friendly interface and easy navigation.
-It is free to download and use.
-
-- Cons
-
-It requires a stable and fast internet connection to access the online features and content.
-It may consume a lot of data and storage space on your device.
-It may have some bugs and glitches that need to be fixed.
-It may not cover all the subjects and topics that you need or want to study.
-It may not be compatible with some devices or operating systems.
-
-Conclusion
-In conclusion, Education Point Online is an education app that can help you prepare for your internal exams in B.Tech/M.Tech/B.Arch. It provides you with video lectures, notes, tests, quizzes, mock tests, doubt sessions, discussion forums, and more for various subjects. It also provides personalized feedback and guidance from experienced teachers who can help you improve your performance. It has an offline mode that allows you to download and watch lectures without internet connection. It has a user-friendly interface and easy navigation. It is free to download and use. However, it also has some drawbacks, such as requiring a stable and fast internet connection, consuming a lot of data and storage space, having some bugs and glitches, not covering all the subjects and topics, and not being compatible with some devices or operating systems. Therefore, you should weigh the pros and cons of the app before using it. You should also compare it with other similar apps available in the market and choose the one that suits your needs and preferences best.
- We hope this article has given you a clear idea of what Education Point Online is and how to use it. If you have any questions or feedback, please feel free to share them with us in the comments section below. Thank you for reading!
- Frequently Asked Questions
-Here are some frequently asked questions about Education Point Online:
-
-What is the mod apk version of Education Point Online?
-The mod apk version of Education Point Online is a modified version of the original app that offers some extra features or benefits that are not available in the original app. For example, some mod apk versions may offer unlimited access to all the courses and subjects, or remove ads or watermarks from the app. However, we do not recommend using the mod apk version of Education Point Online, as it may be illegal, unsafe, or unreliable. It may also violate the terms and conditions of the original app developer. Therefore, you should always use the official version of Education Point Online from Google Play Store or App Store.
- How can I contact the support team of Education Point Online?
-If you have any issues or queries regarding the app, you can contact the support team of Education Point Online by following these steps:
-
-Open the app and tap on the Menu icon on the top left corner of the app.
-Tap on Help & Support to see the list of options available for you.
-Select the option that best suits your issue or query from the list.
-You can also email your issue or query to educationpointonline@gmail.com or call them at +91-9876543210.
-
- How can I rate and review Education Point Online?
-If you want to rate and review Education Point Online, you can do so by following these steps:
-
-Open Google Play Store or App Store on your device and search for Education Point Online.
-Select the app from the search results and tap on Rate & Review.
-Give your rating out of 5 stars and write your review in the text box provided.
-Tap on Submit to post your rating and review.
-
- How can I update my knowledge and information from the web every day, so I can provide you with the most accurate and up-to-date information.
- How can I share Education Point Online with my friends and family?
- If you want to share Education Point Online with your friends and family, you can do so by following these steps:
-
-Open the app and tap on the Menu icon on the top left corner of the app.
-Tap on Share App to see the list of options available for you.
-Select the option that you want to use to share the app, such as WhatsApp, Facebook, Twitter, Email, etc.
-Follow the instructions on the screen to share the app link with your friends and family.
-
- How can I update Education Point Online to the latest version?
-If you want to update Education Point Online to the latest version, you can do so by following these steps:
-
-Open Google Play Store or App Store on your device and search for Education Point Online.
-Select the app from the search results and tap on Update.
-Wait for the app to download and install the latest version on your device.
-Once the app is updated, tap on Open to launch it.
-
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Brain Out Can you pass it? - Free Download and Review.md b/spaces/fatiXbelha/sd/Brain Out Can you pass it? - Free Download and Review.md
deleted file mode 100644
index d00babc43caa9d9c9cadaf752269f9735977ee1b..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Brain Out Can you pass it? - Free Download and Review.md
+++ /dev/null
@@ -1,166 +0,0 @@
-
-Brain Out: Can You Pass It? - A Review of the Tricky Puzzle Game
- Are you looking for a game that can challenge your brain, test your IQ, and make you laugh at the same time? If yes, then you might want to try Brain Out: Can You Pass It?, a free tricky puzzle game that has become very popular among Android users. In this article, we will review the game, tell you how to download and install it on your device, how to play it online in your browser, and how to compare it with other similar games.
- What is Brain Out: Can You Pass It?
- Brain Out: Can You Pass It? is a puzzle game developed by Focus apps. The game was released in September 2019 and has been downloaded over 100 million times. It has a rating of 4.4 out of 5 stars, based on more than 5 million reviews. The game is updated regularly with new levels and features.
-brain out can you pass it download apk DOWNLOAD > https://urllie.com/2uNwAn
- The gameplay and features of Brain Out
- The game consists of a series of tricky brain teasers and different riddles that test your logical thinking, reflexes, accuracy, memory, and creativity. The game does not follow the usual rules or common sense. You have to think outside the box and find the absurd or unexpected solutions to the puzzles. The game has over 200 levels, each with a different question, scenario, or task. Some examples are:
-
-How many holes does this T-shirt have?
-Help the boy win the race.
-Find out the hidden objects.
-Make the equation true.
-Tap fruits from left to right, then tap the hexagon, square, and diamond.
-
- The game also has some features that make it more fun and engaging, such as:
-
-Absolutely unimagined gameplay.
-Easy and simple but humorous game process.
-Funny sound and witty game effects.
-Unexpected game answers.
-Keys that can be used to skip levels or get hints.
-
- The benefits and challenges of playing Brain Out
- Playing Brain Out can have some benefits for your brain and mental health, such as:
-
-Boosting your brain power by stimulating different cognitive functions.
-Improving your problem-solving skills by finding creative solutions.
-Enhancing your memory and concentration by paying attention to details.
-Reducing stress and boredom by having fun and laughing.
-
- However, playing Brain Out can also have some challenges or drawbacks, such as:
-
-Frustrating or confusing you by tricking or misleading you.
-Making you feel stupid or dumb by giving you hard or illogical puzzles.
-Annoying or distracting you by showing too many ads or crashing the game.
-Making you addicted or obsessed by making you want to finish all the levels.
-
- How to download and install Brain Out APK on your Android device
- If you want to play Brain Out on your Android device, you have two options. You can either download it from the Google Play Store or download the APK file from a third-party website. APK stands for Android Package Kit, which is a file format that contains the app code, resources, and metadata. APK files can be used to install apps that are not available on the official app store or to update apps to the latest version. However, APK files can also pose some risks, such as malware, viruses, or compatibility issues. Therefore, you should only download APK files from trusted sources and scan them before installing them on your device.
- The steps to download and install Brain Out APK
- If you choose to download and install Brain Out APK on your Android device, you can follow these steps:
-brain out can you pass it apk free download
-brain out can you pass it android game download
-brain out can you pass it latest version apk
-brain out can you pass it mod apk download
-brain out can you pass it puzzle game download
-brain out can you pass it apk for pc
-brain out can you pass it offline download
-brain out can you pass it apk pure download
-brain out can you pass it app download
-brain out can you pass it apk mirror download
-brain out can you pass it apk file download
-brain out can you pass it online download
-brain out can you pass it hack apk download
-brain out can you pass it apk mod menu download
-brain out can you pass it apk obb download
-brain out can you pass it unlimited hints apk download
-brain out can you pass it no ads apk download
-brain out can you pass it full version apk download
-brain out can you pass it pro apk download
-brain out can you pass it premium apk download
-brain out can you pass it cracked apk download
-brain out can you pass it unlocked apk download
-brain out can you pass it all levels unlocked apk download
-brain out can you pass it updated apk download
-brain out can you pass it new version apk download
-brain out can you pass it 2.2.6 apk download
-brain out can you pass it 2.2.5 apk download
-brain out can you pass it 2.2.4 apk download
-brain out can you pass it 2.2.3 apk download
-brain out can you pass it 2.2.2 apk download
-brain out can you pass it 2.2.1 apk download
-brain out can you pass it 2.2.0 apk download
-brain out can you pass it 2.1.9 apk download
-brain out can you pass it 2.1.8 apk download
-brain out can you pass it 2.1.7 apk download
-brain out can you pass it 2.1.6 apk download
-brain out can you pass it 2.1.5 apk download
-brain out can you pass it 2.1.4 apk download
-brain out can you pass it 2.1.3 apk download
-brain out can you pass it 2.1.2 apk download
-brain out can you pass it 2.1.1 apk download
-brain out can you pass it 2.1.0 apk download
-brain out can you pass it 2.0.9 apk download
-brain out can you pass it 2.0.8 apk download
-brain out can you pass it 2.0.7 apk download
-brain out can you pass it 2.0.6 apk download
-brain out can you pass it 2.0.5 apk download
-brain out can you pass it 2.0.4 apk download
-
-Go to a reliable website that offers Brain Out APK download, such as [APKCombo](^1^), [APKPure](^2^), or [Jogo Fixe](^3^).
-Find the latest version of Brain Out APK and tap on the download button.
-Wait for the download to finish and locate the APK file in your device's file manager.
-Before installing the APK file, you need to enable the installation of apps from unknown sources in your device's settings. This option is usually under security or privacy settings.
-Tap on the APK file and follow the instructions to install it on your device.
-Launch the game and enjoy playing Brain Out.
-
- The advantages and disadvantages of using Brain Out APK
- Using Brain Out APK can have some advantages and disadvantages, such as:
-
-
-Advantages
-Disadvantages
-
-
-You can access the game even if it is not available in your region or device.
-You may encounter malware or viruses that can harm your device or data.
-
-
-You can update the game to the latest version without waiting for the official release.
-You may experience compatibility or performance issues with your device or system.
-
-
-You can enjoy some features or levels that are not included in the official version.
-You may violate the terms of service or privacy policy of the game developer or publisher.
-
-
- How to play Brain Out: Can You Pass It? online in your browser
- If you don't want to download and install Brain Out on your device, you can also play it online in your browser. There are some platforms and websites that offer Brain Out online for free. You can play it on your computer, tablet, or smartphone, as long as you have a stable internet connection and a compatible browser.
- The platforms and websites that offer Brain Out online
- Some of the platforms and websites that offer Brain Out online are:
-
-[BestGames](^4^): This is a website that provides various HTML5 games that can be played online in any browser. You can find Brain Out under the puzzle category and play it without downloading or registering.
-[Game Vui](^5^): This is a website that offers many online games in different genres and languages. You can play Brain Out in Vietnamese or English by tapping or clicking on the screen.
-[Lagged](^6^): This is a website that features many free online games that can be played on any device. You can play Brain Out by using your mouse or touch screen to interact with the puzzles.
-[Poki](^7^): This is a website that hosts many popular games that can be played online for free. You can play Brain Out by using your keyboard or mouse to solve the riddles.
-
- The pros and cons of playing Brain Out online
- Playing Brain Out online can have some pros and cons, such as:
-
-
-Pros
-Cons
-
-
-You don't need to download or install anything on your device.
-You need to have a good internet connection and a compatible browser.
-
-
-You can play it on any device or platform that supports web browsing.
-You may not be able to save your progress or access some features or levels.
-
-
-You can play it anytime and anywhere without taking up any storage space.
-You may see more ads or pop-ups that can interrupt your gameplay.
-
-
- How to compare Brain Out: Can You Pass It? with other similar games
- If you like playing Brain Out, you might also want to try other similar games that can challenge your brain and make you browser. You can also compare it with other similar games and find the best one for you. We hope you enjoyed this article and learned something new about Brain Out. If you have any questions or feedback, please feel free to contact us or leave a comment below. Thank you for reading and have a great day!
- The frequently asked questions and answers about Brain Out
- Here are some of the frequently asked questions and answers about Brain Out that you might find helpful:
-
-Q: How can I get more keys in Brain Out?
-A: You can get more keys in Brain Out by watching ads, completing daily tasks, or buying them with real money.
-Q: How can I contact the developer of Brain Out?
-A: You can contact the developer of Brain Out by sending an email to support@focusapp.net or visiting their Facebook page.
-Q: How can I share my progress or achievements in Brain Out?
-A: You can share your progress or achievements in Brain Out by taking screenshots or recording videos and posting them on social media platforms, such as Facebook, Instagram, or Twitter.
-Q: How can I rate or review Brain Out?
-A: You can rate or review Brain Out by going to the Google Play Store or the website where you downloaded or played the game and leaving your feedback and rating.
-Q: How can I uninstall or delete Brain Out from my device?
-A: You can uninstall or delete Brain Out from your device by going to your device's settings, finding the app manager, selecting Brain Out, and tapping on uninstall or delete.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Euro Truck Simulator 2 Demo - Steam Download and Experience the Best Truck Simulation Game.md b/spaces/fatiXbelha/sd/Euro Truck Simulator 2 Demo - Steam Download and Experience the Best Truck Simulation Game.md
deleted file mode 100644
index 6b52d68432a00cdd1c30de2c401e28792ff0ebac..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Euro Truck Simulator 2 Demo - Steam Download and Experience the Best Truck Simulation Game.md
+++ /dev/null
@@ -1,88 +0,0 @@
-
-European Truck Simulator Download for PC: How to Get Started with the Best Driving Game
-If you are looking for a fun and realistic driving game that lets you explore the beautiful landscapes of Europe, deliver various cargoes across impressive distances, and run your own trucking business, then you should definitely try Euro Truck Simulator 2 . This game is one of the most popular and acclaimed simulation games on the market, and it is available for download on PC. In this article, we will tell you everything you need to know about this game, how to download it, and how to play it.
-european truck simulator download for pc DOWNLOAD ✅ https://urllie.com/2uNGfF
-What is European Truck Simulator?
-Euro Truck Simulator 2 is a game developed by SCS Software, a Czech company that specializes in creating realistic and immersive driving simulation games. The game was released in 2012 and has since received many updates, expansions, and awards. Euro Truck Simulator 2 is not just a driving game, but also a business management and career progression game. Here are some of the main features of the game:
-A realistic and immersive driving simulation game
-In Euro Truck Simulator 2, you can drive a variety of trucks from different European brands, such as Volvo, Scania, Mercedes-Benz, MAN, DAF, Renault, and more. You can customize your truck with different parts, paint jobs, accessories, and decals. You can also choose from different types of trailers and cargoes, such as food, chemicals, furniture, vehicles, livestock, and more. You can drive across more than 60 European cities and countries, such as Germany, France, Italy, Spain, Poland, Sweden, Norway, Finland, Romania, Turkey, and more. You can enjoy the realistic scenery, weather, traffic, landmarks, and day-night cycle. You can also follow the road rules and regulations, such as speed limits, tolls, traffic lights, signs, weigh stations, rest areas, and more.
-A business management and career progression game
-In Euro Truck Simulator 2, you can start your own trucking company and hire drivers to work for you. You can buy garages in different locations and expand your fleet of trucks. You can also manage your finances, loans, expenses, income, reputation, contracts, and more. You can also level up your skills and unlock new perks and abilities. For example, you can improve your fuel efficiency, cargo delivery time, long distance driving, fragile cargo handling, eco driving, high value cargo delivery, heavy cargo delivery, ADR (dangerous goods) delivery, and more.
-A game with many expansions and customization options
-Euro Truck Simulator 2 is a game that is constantly updated and improved by the developers. They have released many expansions that add new regions, countries, cities, roads, landmarks, cargoes, trucks, and features to the game. Some of the most popular expansions are: - Going East! - adds Poland, Czech Republic, Slovakia, and Hungary - Scandinavia - adds Denmark, Norway, and Sweden - Vive la France ! - adds France - Italia - adds Italy - Road to the Black Sea - adds Romania, Bulgaria, and Turkey - Iberia - adds Spain and Portugal You can also customize your game with many mods created by the community. Mods are modifications that change or add new content to the game. For example, you can find mods that add new trucks, trailers, cargoes, maps, graphics, sounds, traffic, weather, physics, and more. You can download mods from various websites or from the Steam Workshop.
-How to Download European Truck Simulator for PC?
-If you are interested in playing Euro Truck Simulator 2 on your PC, you have two main options to download the game: the official website of the game or the Steam platform. Here are the steps for each option:
-The official website of the game
-You can download the game directly from the official website of SCS Software. Here are the steps: - Go to https://eurotrucksimulator2.com/ and click on the "Buy Now" button. - Choose your preferred edition of the game. You can buy the base game only or the base game with some or all of the expansions. You can also buy some bundles that include other games from SCS Software, such as American Truck Simulator or Bus Driver. - Choose your preferred payment method. You can pay with credit card, PayPal, or other options. - After you complete your purchase, you will receive an email with a link to download the game installer. You can also find the link in your account on the website. - Download and run the installer and follow the instructions to install the game on your PC.
-The Steam platform
-You can also download the game from Steam, a popular digital distribution platform for PC games. Here are the steps: - Go to https://store.steampowered.com/app/227300/Euro_Truck_Simulator_2/ and click on the "Add to Cart" button. - If you don't have a Steam account, you will need to create one and install the Steam client on your PC. - After you add the game to your cart, you can proceed to checkout and pay with your preferred payment method. - After you complete your purchase, you will find the game in your Steam library. You can download and install it from there.
-The system requirements for the game
-Before you download and play Euro Truck Simulator 2, you should make sure that your PC meets the minimum or recommended system requirements for the game. Here are the system requirements according to SCS Software: | Minimum | Recommended | | --- | --- | | OS: Windows 7 | OS: Windows 7/8.1/10 64-bit | | Processor: Dual core CPU 2.4 GHz | Processor: Quad core CPU 3.0 GHz | | Memory: 4 GB RAM | Memory: 6 GB RAM | | Graphics: GeForce GTS 450-class (Intel HD 4000) | Graphics: GeForce GTX 760-class (2 GB) | | Storage: 5 GB available space | Storage: 5 GB available space | You should also make sure that you have a stable internet connection and a compatible keyboard, mouse, or controller to play the game.
-How to Play European Truck Simulator on PC?
-After you download and install Euro Truck Simulator 2 on your PC, you are ready to start playing. Here are some of the basic controls and features of the game, as well as some tips and tricks for beginners:
-*euro truck simulator 2 free download full version pc*
-*european truck simulator pc game download*
-*download euro truck simulator 2 for windows 10*
-*euro truck simulator 2 download steam*
-*euro truck simulator 2 demo download*
-*euro truck simulator 2 download size*
-*euro truck simulator 2 mods download pc*
-*euro truck simulator 2 download utorrent*
-*euro truck simulator 2 download mac*
-*euro truck simulator 2 download android*
-*euro truck simulator 2 multiplayer download pc*
-*euro truck simulator 2 dlc download*
-*euro truck simulator 2 crack download*
-*euro truck simulator 2 map download*
-*euro truck simulator 2 bus mod download pc*
-*euro truck simulator 2 online download*
-*euro truck simulator 2 latest version download*
-*euro truck simulator 2 save game download pc*
-*euro truck simulator 2 activation key download*
-*euro truck simulator 2 patch download*
-*euro truck simulator 2 torrent download kickass*
-*euro truck simulator 2 scandinavia download pc*
-*euro truck simulator 2 going east download pc*
-*euro truck simulator 2 vive la france download pc*
-*euro truck simulator 2 italia download pc*
-*euro truck simulator 2 road to the black sea download pc*
-*euro truck simulator 2 beyond the baltic sea download pc*
-*euro truck simulator 2 promods download pc*
-*euro truck simulator 2 profile download pc*
-*euro truck simulator 2 trainer download pc*
-*download euro truck simulator for windows 7*
-*download euro truck simulator for windows xp*
-*download euro truck simulator for windows vista*
-*download euro truck simulator for windows 8.1*
-*download euro truck simulator for linux*
-*download euro truck simulator for mac os x*
-*download euro truck simulator for android apk*
-*download euro truck driver for pc*
-*download euro coach simulator for pc*
-*download american truck simulator for pc*
-*download euro cargo transport for pc*
-*download euro heavy lorry driver for pc*
-*download euro offroad transport for pc*
-*download euro bus driving for pc*
-*download euro car parking for pc*
-The basic controls and features of the game
-The game has a simple and intuitive user interface that lets you access different menus and options. You can use your mouse or keyboard to navigate through them. Here are some of the main menus and options: - The profile menu - where you can create, load, or delete your profile. You can also customize your avatar, name, company logo, preferred truck design, and difficulty settings. - The job market menu - where you can find and accept different types of jobs. You can choose from quick jobs, freight market, external contracts, or cargo market. Quick jobs are pre-set jobs that let you drive a company truck with a specific cargo and destination. Freight market jobs let you choose your own cargo and destination, but you need to own a truck. External contracts are jobs that are synchronized with World of Trucks, an online service that connects players from around the world. Cargo market jobs let you use your own trailer or buy a new one. - The garage menu - where you can buy, sell, upgrade, or customize your trucks and trailers. You can also hire drivers and manage your fleet. - The bank menu - where you can take or repay loans. You will need loans to buy new trucks or garages, but you will also have to pay interest. - The skill menu - where you can level up your skills and unlock new perks and abilities. You can improve your fuel efficiency, cargo delivery time, long distance driving, fragile cargo handling, eco driving, high value cargo delivery, heavy cargo delivery, ADR (dangerous goods) delivery, and more. - The map menu - where you can see the map of Europe and plan your route. You - The radio menu - where you can listen to different radio stations from different countries. You can also add your own music files or internet radio streams. - The options menu - where you can adjust the graphics, sound, gameplay, controls, and other settings of the game. The game also has a simple and intuitive driving interface that shows you different information and indicators. You can use your keyboard, mouse, or controller to drive your truck. Here are some of the main controls and features: - The speedometer - shows your current speed in km/h or mph. You can also see your cruise control speed, if activated. - The tachometer - shows your engine RPM and gear. You can also see your fuel level, engine temperature, and damage indicators. - The navigation - shows your GPS map and directions. You can also see your estimated time of arrival, distance to destination, and speed limit. - The mirrors - show your rear and side views. You can also use the indicators to signal your intentions to other drivers. - The dashboard - shows your truck information and status. You can also see your headlights, wipers, hazards, parking brake, and other buttons. - The F1-F12 keys - let you access different camera views. You can switch between interior, exterior, cabin, bumper, roof, and other views. - The ESC key - lets you pause the game and access the main menu.
-The tips and tricks for beginners
-If you are new to Euro Truck Simulator 2, you might find the game challenging at first. Here are some tips and tricks that can help you get started: - Choose a simple and easy job for your first delivery. You can select a quick job that provides you with a company truck and a short distance to travel. This way, you can get familiar with the driving mechanics and the road rules without risking your own money or reputation. - Follow the traffic laws and regulations. You should obey the speed limits, traffic lights, signs, tolls, weigh stations, rest areas, and other road rules. If you break them, you might get fined or damage your truck or cargo. You should also drive carefully and avoid collisions with other vehicles or objects. - Plan your route and fuel stops. You should check the map before you start your delivery and choose the best route for your destination. You should also keep an eye on your fuel level and find the nearest gas station when you need to refill. You don't want to run out of gas in the middle of nowhere or miss a deadline because of a detour. - Save your game frequently. You should save your game before you start a delivery, after you complete a delivery, or whenever you want to take a break. This way, you can avoid losing your progress or having to repeat a difficult or long delivery. - Have fun and explore. Euro Truck Simulator 2 is a game that lets you enjoy the beauty and diversity of Europe. You can drive across different countries and regions, see famous landmarks and attractions, experience different weather and seasons, and discover new places and routes. You can also customize your truck and trailer, listen to music or podcasts, take screenshots or videos, and share them with other players.
-The best mods and community resources for the game
-Euro Truck Simulator 2 is a game that has a large and active community of players and modders. You can find many mods and resources that can enhance your game experience or help you with any issues or questions. Here are some of the best mods and community resources for the game: - ProMods - a mod that adds new maps, roads, cities, countries, landmarks, and features to the game. It covers regions such as Iceland, Ireland, Scotland, Baltic States, Balkans, Cyprus, and more. - Realistic Graphics Mod - a mod that improves the graphics, lighting, colors, textures, and effects of the game. It makes the game look more realistic and beautiful. - TruckersMP - a mod that allows you to play online multiplayer with other players. You can join servers with different rules and modes, such as simulation, arcade, or convoy. You can also chat with other players, form companies, and participate in events. - ETS2 Studio - a tool that lets you create your own mods for the game. You can make new trucks, trailers, cargoes, paint jobs, accessories, and more. - SCS Forum - the official forum of SCS Software where you can find news, updates, announcements, guides, tutorials, tips, tricks, and more about the game. You can also interact with other players and developers, ask questions, give feedback, and report bugs. - World of Trucks - an online service that connects players from around the world. You can create your profile, upload your screenshots or videos, join competitions, earn achievements, and more.
-Conclusion In conclusion, Euro Truck Simulator 2 is a game that offers you a realistic and immersive driving simulation experience. You can drive across Europe, deliver various cargoes, and run your own trucking business. You can also download the game from the official website or Steam, and customize it with many expansions and mods. If you are looking for a fun and challenging game that lets you explore the beauty and diversity of Europe, you should definitely try Euro Truck Simulator 2 today.
-FAQs
-Here are some of the frequently asked questions about Euro Truck Simulator 2:
-How much does Euro Truck Simulator 2 cost?
-The base game of Euro Truck Simulator 2 costs $19.99 on the official website or Steam. However, you can often find discounts or sales that lower the price. You can also buy the game with some or all of the expansions for a higher price. The expansions range from $8.99 to $17.99 each.
-Is Euro Truck Simulator 2 compatible with Windows 10?
-Yes, Euro Truck Simulator 2 is compatible with Windows 10. However, you might need to update your drivers or settings to ensure optimal performance. You can also check the official forum or the Steam community for any issues or solutions.
-Can I play Euro Truck Simulator 2 with a controller?
-Yes, you can play Euro Truck Simulator 2 with a controller. The game supports various types of controllers, such as Xbox, PlayStation, Logitech, Thrustmaster, and more. You can also customize your controller settings in the options menu.
-Can I play Euro Truck Simulator 2 with a steering wheel?
-Yes, you can play Euro Truck Simulator 2 with a steering wheel. The game supports various types of steering wheels, such as Logitech, Thrustmaster, Fanatec, and more. You can also customize your steering wheel settings in the options menu.
-Can I play Euro Truck Simulator 2 with VR?
-Yes, you can play Euro Truck Simulator 2 with VR. The game supports various types of VR headsets, such as Oculus Rift, HTC Vive, Valve Index, and more. You can also customize your VR settings in the options menu.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/metrics/LEC.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/metrics/LEC.py
deleted file mode 100644
index 3eef2d2f00a4d757a56b6e845a8fde16aab306ab..0000000000000000000000000000000000000000
--- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/metrics/LEC.py
+++ /dev/null
@@ -1,134 +0,0 @@
-import sys
-import argparse
-import torch
-import numpy as np
-from torch.utils.data import DataLoader
-
-sys.path.append(".")
-sys.path.append("..")
-
-from configs import data_configs
-from datasets.images_dataset import ImagesDataset
-from utils.model_utils import setup_model
-
-
-class LEC:
- def __init__(self, net, is_cars=False):
- """
- Latent Editing Consistency metric as proposed in the main paper.
- :param net: e4e model loaded over the pSp framework.
- :param is_cars: An indication as to whether or not to crop the middle of the StyleGAN's output images.
- """
- self.net = net
- self.is_cars = is_cars
-
- def _encode(self, images):
- """
- Encodes the given images into StyleGAN's latent space.
- :param images: Tensor of shape NxCxHxW representing the images to be encoded.
- :return: Tensor of shape NxKx512 representing the latent space embeddings of the given image (in W(K, *) space).
- """
- codes = self.net.encoder(images)
- assert codes.ndim == 3, f"Invalid latent codes shape, should be NxKx512 but is {codes.shape}"
- # normalize with respect to the center of an average face
- if self.net.opts.start_from_latent_avg:
- codes = codes + self.net.latent_avg.repeat(codes.shape[0], 1, 1)
- return codes
-
- def _generate(self, codes):
- """
- Generate the StyleGAN2 images of the given codes
- :param codes: Tensor of shape NxKx512 representing the StyleGAN's latent codes (in W(K, *) space).
- :return: Tensor of shape NxCxHxW representing the generated images.
- """
- images, _ = self.net.decoder([codes], input_is_latent=True, randomize_noise=False, return_latents=True)
- images = self.net.face_pool(images)
- if self.is_cars:
- images = images[:, :, 32:224, :]
- return images
-
- @staticmethod
- def _filter_outliers(arr):
- arr = np.array(arr)
-
- lo = np.percentile(arr, 1, interpolation="lower")
- hi = np.percentile(arr, 99, interpolation="higher")
- return np.extract(
- np.logical_and(lo <= arr, arr <= hi), arr
- )
-
- def calculate_metric(self, data_loader, edit_function, inverse_edit_function):
- """
- Calculate the LEC metric score.
- :param data_loader: An iterable that returns a tuple of (images, _), similar to the training data loader.
- :param edit_function: A function that receives latent codes and performs a semantically meaningful edit in the
- latent space.
- :param inverse_edit_function: A function that receives latent codes and performs the inverse edit of the
- `edit_function` parameter.
- :return: The LEC metric score.
- """
- distances = []
- with torch.no_grad():
- for batch in data_loader:
- x, _ = batch
- inputs = x.to(device).float()
-
- codes = self._encode(inputs)
- edited_codes = edit_function(codes)
- edited_image = self._generate(edited_codes)
- edited_image_inversion_codes = self._encode(edited_image)
- inverse_edit_codes = inverse_edit_function(edited_image_inversion_codes)
-
- dist = (codes - inverse_edit_codes).norm(2, dim=(1, 2)).mean()
- distances.append(dist.to("cpu").numpy())
-
- distances = self._filter_outliers(distances)
- return distances.mean()
-
-
-if __name__ == "__main__":
- device = "cuda"
-
- parser = argparse.ArgumentParser(description="LEC metric calculator")
-
- parser.add_argument("--batch", type=int, default=8, help="batch size for the models")
- parser.add_argument("--images_dir", type=str, default=None,
- help="Path to the images directory on which we calculate the LEC score")
- parser.add_argument("ckpt", metavar="CHECKPOINT", help="path to the model checkpoints")
-
- args = parser.parse_args()
- print(args)
-
- net, opts = setup_model(args.ckpt, device)
- dataset_args = data_configs.DATASETS[opts.dataset_type]
- transforms_dict = dataset_args['transforms'](opts).get_transforms()
-
- images_directory = dataset_args['test_source_root'] if args.images_dir is None else args.images_dir
- test_dataset = ImagesDataset(source_root=images_directory,
- target_root=images_directory,
- source_transform=transforms_dict['transform_source'],
- target_transform=transforms_dict['transform_test'],
- opts=opts)
-
- data_loader = DataLoader(test_dataset,
- batch_size=args.batch,
- shuffle=False,
- num_workers=2,
- drop_last=True)
-
- print(f'dataset length: {len(test_dataset)}')
-
- # In the following example, we are using an InterfaceGAN based editing to calculate the LEC metric.
- # Change the provided example according to your domain and needs.
- direction = torch.load('../editings/interfacegan_directions/age.pt').to(device)
-
- def edit_func_example(codes):
- return codes + 3 * direction
-
-
- def inverse_edit_func_example(codes):
- return codes - 3 * direction
-
- lec = LEC(net, is_cars='car' in opts.dataset_type)
- result = lec.calculate_metric(data_loader, edit_func_example, inverse_edit_func_example)
- print(f"LEC: {result}")
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bermuda Adventures Farm Island MOD APK Features and Benefits.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bermuda Adventures Farm Island MOD APK Features and Benefits.md
deleted file mode 100644
index d952f4205b81695c9346b0116a0cc4acd23b3b8b..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bermuda Adventures Farm Island MOD APK Features and Benefits.md
+++ /dev/null
@@ -1,124 +0,0 @@
-
-Bermuda Adventures Farm Island Mod APK: A Fun and Relaxing Tropic Simulation Game
- Do you love simulation games that let you create your own island paradise? Do you want to experience the beauty and adventure of Bermuda? Do you want to enjoy unlimited gems and coins without spending real money? If you answered yes to any of these questions, then you should try Bermuda Adventures Farm Island Mod APK , a fun and relaxing tropic simulation game that will make you feel like you are on a vacation.
- What is Bermuda Adventures Farm Island?
- Bermuda Adventures Farm Island is a simulation game developed by Samfinaco Limited. It is available for Android devices and has over one million downloads on Google Play. In this game, you can:
-bermuda adventures farm island mod apk Download Zip »»» https://gohhs.com/2uPnwK
- A game where you can create your own island paradise
- You can design your own island by building houses, farms, workshops, restaurants, and more. You can also decorate your island with flowers, trees, fences, statues, and other items. You can make your island as beautiful and cozy as you want.
- A game where you can explore, farm, craft, and trade
- You can explore the stunning scenery of Bermuda by sailing on a boat, flying on a plane, or riding on a bike. You can discover new islands, landmarks, animals, and treasures. You can also farm various crops, fruits, and fish on your island. You can use them to cook delicious meals or craft useful items. You can also trade with other islands and earn money.
- A game where you can meet new friends and help them
- You can meet many interesting characters in Bermuda Adventures Farm Island. They will have different stories, personalities, and requests. You can help them with their problems or join them in their adventures. You can also chat with other players online and make new friends.
- What are the features of Bermuda Adventures Farm Island Mod APK?
- Bermuda Adventures Farm Island Mod APK is a modified version of the original game that gives you some extra benefits. These include:
- Unlimited gems and coins
- Gems and coins are the main currencies in Bermuda Adventures Farm Island. You can use them to buy items, upgrade buildings, unlock new features, and more. However, they are not easy to earn in the game. You have to complete tasks, watch ads, or spend real money to get them. With Bermuda Adventures Farm Island Mod APK, you don't have to worry about that. You will have unlimited gems and coins at your disposal. You can use them as much as you want without any restrictions.
- Free shopping and upgrades
-
With Bermuda Adventures Farm Island Mod APK, you can also enjoy free shopping and upgrades. You can buy any item you want from the shop without spending any gems or coins. You can also upgrade your buildings and facilities to the maximum level without waiting for time or resources. You can make your island more productive and attractive with ease.
- No ads and no root required
- Another advantage of Bermuda Adventures Farm Island Mod APK is that it removes all the annoying ads from the game. You can play the game without any interruptions or distractions. You can also install the mod apk file without rooting your device. You don't have to worry about any security risks or compatibility issues.
-bermuda adventures farm island unlimited gems
-bermuda adventures farm island hack apk download
-bermuda adventures farm island cheats and tips
-bermuda adventures farm island latest version mod
-bermuda adventures farm island free shopping mod
-bermuda adventures farm island gameplay and review
-bermuda adventures farm island mod apk for android
-bermuda adventures farm island mod apk offline
-bermuda adventures farm island mod apk no root
-bermuda adventures farm island mod apk unlimited money
-bermuda adventures farm island mod apk 2023
-bermuda adventures farm island mod apk rexdl
-bermuda adventures farm island mod apk revdl
-bermuda adventures farm island mod apk happymod
-bermuda adventures farm island mod apk an1
-bermuda adventures farm island mod apk android 1
-bermuda adventures farm island mod apk obb
-bermuda adventures farm island mod apk data
-bermuda adventures farm island mod apk pure
-bermuda adventures farm island mod apk apkpure
-bermuda adventures farm island mod apk mob.org
-bermuda adventures farm island mod apk uptodown
-bermuda adventures farm island mod apk 1.11.0
-bermuda adventures farm island mod apk 1.10.0
-bermuda adventures farm island mod apk 1.9.0
-bermuda adventures farm island mod apk 1.8.0
-bermuda adventures farm island mod apk 1.7.0
-bermuda adventures farm island mod apk 1.6.0
-bermuda adventures farm island mod apk 1.5.0
-bermuda adventures farm island mod apk 1.4.0
-bermuda adventures farm island mod apk 1.3.0
-bermuda adventures farm island mod apk 1.2.0
-bermuda adventures farm island mod apk 1.1.0
-bermuda adventures farm island mod apk 1.0.0
-how to install bermuda adventures farm island mod apk
-how to play bermuda adventures farm island mod apk
-how to update bermuda adventures farm island mod apk
-how to get free gems in bermuda adventures farm island mod apk
-how to unlock all islands in bermuda adventures farm island mod apk
-how to build a house in bermuda adventures farm island mod apk
-how to make friends in bermuda adventures farm island mod apk
-how to breed animals in bermuda adventures farm island mod apk
-how to grow crops in bermuda adventures farm island mod apk
-how to craft items in bermuda adventures farm island mod apk
-how to complete quests in bermuda adventures farm island mod apk
-how to earn money in bermuda adventures farm island mod apk
-how to decorate your farm in bermuda adventures farm island mod apk
-how to explore the map in bermuda adventures farm island mod apk
- How to download and install Bermuda Adventures Farm Island Mod APK?
- If you want to download and install Bermuda Adventures Farm Island Mod APK, you can follow these simple steps:
- Download the mod apk file from a trusted source
- The first step is to download the mod apk file from a reliable source. You can search for it online or use the link provided below. Make sure you download the latest version of the mod apk file that matches your device specifications.
- Enable unknown sources on your device settings
- The next step is to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play. To do this, go to your device settings, then security, then unknown sources. Turn on the option and confirm your choice.
- Install the mod apk file and enjoy the game
- The final step is to install the mod apk file and enjoy the game. Locate the downloaded mod apk file on your device storage and tap on it. Follow the instructions on the screen and wait for the installation to finish. Once done, you can launch the game and start your adventure.
- How to play Bermuda Adventures Farm Island Mod APK?
- Bermuda Adventures Farm Island Mod APK is easy to play and has a user-friendly interface. Here are some tips on how to play the game:
- Start your adventure by customizing your character
- When you start the game, you will be able to customize your character. You can choose your gender, skin tone, hair style, eye color, and outfit. You can also name your character and choose a pet companion. You can change your appearance later in the game if you want.
- Harvest crops, fruits, and fish on your island
- One of the main activities in the game is harvesting crops, fruits, and fish on your island. You can plant seeds, water them, and wait for them to grow. You can also collect fruits from trees and bushes, and fish from ponds and rivers. You can use these resources for cooking, crafting, or trading.
- Cook delicious meals and craft useful items
- Another important activity in the game is cooking delicious meals and crafting useful items. You can use the resources you harvested or bought to make different dishes and products. You can use them for yourself, for your friends, or for quests. You can also sell them for money or gems.
- Trade with other islands and complete quests
- You can also trade with other islands and complete quests in the game. You can visit other islands by using your boat, plane, or bike. You can buy or sell items with them, or exchange gifts. You can also accept quests from them, which will reward you with money, gems, or items.
- Explore the secrets of Bermuda and discover its mysteries
- The game also has a story mode that will let you explore the secrets of Bermuda and discover its mysteries. You will encounter different characters, events, and puzzles along the way. You will also learn more about the history and culture of Bermuda.
- Why should you play Bermuda Adventures Farm Island Mod APK?
- Bermuda Adventures Farm Island Mod APK is a game that will give you a lot of fun and relaxation. Here are some reasons why you should play it:
- It is a fun and relaxing game that will make you feel like you are on a vacation
- The game has beautiful graphics, soothing music, and realistic sound effects that will make you feel like you are on a vacation in Bermuda. The game has a relaxing pace that will let you enjoy every moment of your island life.
- It is a game that will challenge your creativity and skills
- The game also has many challenges that will test your creativity and skills. You will have to design your island, manage your resources, complete quests, solve puzzles, and more. The game will keep you entertained and engaged for hours.
- It is a game that will let you interact with other players and make friends
- The game also has a social aspect that will let you interact with other players and make friends. You can chat with them, send them gifts, visit their islands, and help them. You can also join a club or create your own. You can participate in club events, competitions, and parties.
- Conclusion
- Bermuda Adventures Farm Island Mod APK is a fun and relaxing tropic simulation game that will make you feel like you are on a vacation. You can create your own island paradise, explore, farm, craft, trade, and more. You can also enjoy unlimited gems and coins, free shopping and upgrades, no ads and no root required. You can download and install the mod apk file easily and start your adventure. You can also play with other players and make friends. Bermuda Adventures Farm Island Mod APK is a game that you should not miss.
- FAQs
- Here are some frequently asked questions about Bermuda Adventures Farm Island Mod APK:
- Is Bermuda Adventures Farm Island Mod APK safe to use?
- Yes, Bermuda Adventures Farm Island Mod APK is safe to use. It does not contain any viruses, malware, or spyware. It does not require root access or any permissions. It does not affect the performance or security of your device.
- Is Bermuda Adventures Farm Island Mod APK compatible with my device?
- Bermuda Adventures Farm Island Mod APK is compatible with most Android devices that have Android 4.4 or higher. However, some devices may have different specifications or settings that may cause some issues. If you encounter any problems, you can contact the developer or the mod apk source for support.
- How can I update Bermuda Adventures Farm Island Mod APK?
- Bermuda Adventures Farm Island Mod APK is updated regularly to fix bugs, improve features, and add new content. You can check for updates on the mod apk source or the developer's website. You can also enable automatic updates on your device settings. However, you may have to uninstall and reinstall the mod apk file if there are major changes.
- How can I uninstall Bermuda Adventures Farm Island Mod APK?
- If you want to uninstall Bermuda Adventures Farm Island Mod APK, you can do so easily by following these steps:
-
-Go to your device settings, then apps, then Bermuda Adventures Farm Island.
-Tap on uninstall and confirm your choice.
-Delete the mod apk file from your device storage.
-
-You can also reinstall the original game from Google Play if you want.
- Where can I get more information about Bermuda Adventures Farm Island Mod APK?
- If you want to get more information about Bermuda Adventures Farm Island Mod APK, you can visit the following sources:
-
-The mod apk source: [text]
-The developer's website: [text]
-The official Facebook page: [text]
-
-You can also leave a comment or a review on the mod apk source or the developer's website if you have any feedback or suggestions.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Create and Discover Short Videos with TikTok APK 27.8.3.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Create and Discover Short Videos with TikTok APK 27.8.3.md
deleted file mode 100644
index 3e049cc89d534b8780852b5f8956f4e4fd19dc70..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Create and Discover Short Videos with TikTok APK 27.8.3.md
+++ /dev/null
@@ -1,138 +0,0 @@
-
-TikTok Apk v27.8.3: Everything You Need to Know
- TikTok is a video-sharing app that allows users to create and share short-form videos on any topic. It’s mainly mobile-based, although you can still watch TikTok videos using the web app. The platform allows users to get creative with their content using filters, stickers, voiceovers, sound effects, and background music.
- TikTok has become one of the most popular social media apps in the world, with over 800 million active users and 2 billion downloads as of April 2020. It offers TikTok creators access to a massive library of music and sounds as well as some great video editing tools and the usual suspects you find in social media apps—voice changers, filters, effects, and more.
-tiktok apk v27.8.3 Download Zip >>> https://gohhs.com/2uPux3
- But TikTok isn’t the only app of its kind available. In fact, there are several video editing and sharing apps out there. Here are some of the best features and benefits of TikTok apk v27.8.3, how to install it on your device, and some alternatives you can try if you want to explore other options.
- What are the features and benefits of TikTok apk v27.8.3?
- TikTok apk v27.8.3 is the latest version of the app that was released on June 16, 2023. It comes with some new features and improvements that make it even more fun and engaging to use.
- Some of the features and benefits of TikTok apk v27.8.3 are:
-
-Explore new video effects : The app has added some new video effects that you can use to spice up your videos. For example, you can use the Green Screen Sky effect to change the background of your video to a different sky scene. You can also use the Face Morph effect to transform your face into someone else's.
-Create playlists of your videos : The app has introduced a new feature that allows you to create playlists of your videos. This way, you can organize your videos by theme, mood, or occasion and share them with your followers or friends. You can also watch playlists created by other users and discover new content.
-Watch videos in a playlist : The app has also improved the way you can watch videos in a playlist. You can now swipe left or right to skip or go back to a video in a playlist. You can also see how many videos are in a playlist and how many you have watched.
-Enjoy better performance and stability : The app has fixed some bugs and enhanced the performance and stability of the app. You can now enjoy a smoother and faster experience while using TikTok.
-
- How to install TikTok apk v27.8.3 on different devices?
- If you want to install TikTok apk v27.8.3 on your device, you need to follow these steps:
- For Android devices:
-
-Go to [TikTok 27.8.3 APK Download - Softpedia](^1^) and download the apk file.
-Open the file manager on your device and locate the downloaded file.
-Tap on the file and allow installation from unknown sources if prompted.
-Follow the instructions on the screen to complete the installation.
-Launch the app and enjoy.
-
- For iOS devices:
-
-Go to [TikTok for Android -
-Go to [TikTok on the App Store](^1^) and download the app.
-Open the app and follow the instructions on the screen to sign up or log in.
-Allow the app to access your camera, microphone, and photos if prompted.
-Start creating and watching videos on TikTok.
-
- For Windows devices:
-
-Go to [TikTok - Download](^2^) and download the TikTok for Windows app.
-Open the downloaded file and follow the instructions on the screen to install the app.
-Launch the app and sign up or log in with your TikTok account.
-Start creating and watching videos on TikTok.
-
- What are some alternatives to TikTok if you want to try something else?
- TikTok is not the only video-sharing app out there. If you want to try something else, here are some alternatives you can check out:
-
-
-App
-Description
-
-
-Instagram Reels
-Instagram Reels is a feature within Instagram that allows you to create and share 15-second videos with music, filters, and effects. You can also browse and watch Reels from other users in a dedicated tab on the app. Reels is similar to TikTok, but with a more familiar interface and integration with Instagram.
-
-
-YouTube Shorts
-YouTube Shorts is a feature within YouTube that allows you to create and share 15-second videos with music, filters, and effects. You can also browse and watch Shorts from other users in a dedicated tab on the app. Shorts is similar to TikTok, but with a more diverse content library and integration with YouTube.
-
-
-Dubsmash
-Dubsmash is a video-sharing app that allows you to create and share short videos with lip-syncing, dancing, comedy, and more. You can also browse and watch videos from other users in various categories. Dubsmash is similar to TikTok, but with a more niche focus and community.
-
-
- Conclusion
- TikTok apk v27.8.3 is the latest version of the popular video-sharing app that offers some new features and improvements that make it more fun and engaging to use. You can explore new video effects, create playlists of your videos, watch videos in a playlist, and enjoy better performance and stability. You can install TikTok apk v27.8.3 on your Android, iOS, or Windows device by following the steps above. You can also try some alternatives to TikTok if you want to experience different video-sharing platforms.
-tiktok apk v27.8.3 download
-tiktok apk v27.8.3 mod
-tiktok apk v27.8.3 latest version
-tiktok apk v27.8.3 free
-tiktok apk v27.8.3 android
-tiktok apk v27.8.3 update
-tiktok apk v27.8.3 premium
-tiktok apk v27.8.3 unlocked
-tiktok apk v27.8.3 for pc
-tiktok apk v27.8.3 online
-tiktok apk v27.8.3 hack
-tiktok apk v27.8.3 no watermark
-tiktok apk v27.8.3 without ads
-tiktok apk v27.8.3 old version
-tiktok apk v27.8.3 install
-tiktok apk v27.8.3 review
-tiktok apk v27.8.3 features
-tiktok apk v27.8.3 pro
-tiktok apk v27.8.3 cracked
-tiktok apk v27.8.3 full
-tiktok apk v27.8.3 beta
-tiktok apk v27.8.3 new
-tiktok apk v27.8.3 original
-tiktok apk v27.8.3 official
-tiktok apk v27.8.3 plus
-tiktok apk v27.8.3 unlimited
-tiktok apk v27.8.3 2022
-tiktok apk v27.8.3 2021
-tiktok apk v27.8.3 2020
-tiktok apk v27.8.3 2019
-tiktok apk v27.8.3 2018
-tiktok apk v27.8.3 2017
-tiktok apk v27.8.3 2016
-tiktok apk v27.8.3 2015
-tiktok apk v27.8.3 2014
-tiktok apk v27.8.3 2013
-tiktok apk v27.8.3 2012
-tiktok apk v27.8.3 2011
-tiktok apk v27.8.3 2010
-tiktok apk v27
- If you enjoyed this article, please share it with your friends and family who might be interested in TikTok apk v27.8.3. Also, feel free to leave a comment below if you have any questions or feedback about the app or the article. Thank you for reading!
- FAQs
- Here are some frequently asked questions about TikTok apk v27.8.3:
-
-Is TikTok apk v27.8.3 safe to use?
-TikTok apk v27.8.3 is safe to use as long as you download it from a trusted source like [TikTok 27.8.3 APK Download - Softpedia](^1^) for Android devices or [TikTok on the App Store](^2^) for iOS devices. You should also be careful about what content you share and who you interact with on the app.
- What are the minimum requirements for TikTok apk v27.8.3?
-TikTok apk v27.8.3 requires Android 4.1 or higher for Android devices or iOS 9.3 or higher for iOS devices. It also requires an internet connection and access to your camera, microphone, and photos.
- How can I update TikTok apk v27.8.3?
-You can update TikTok apk v27.8.3 by going to [TikTok 27.8.3 APK Download - Softpedia](^1^) for Android devices or [TikTok on the App Store](^2^) for iOS devices and downloading the latest version of the app.
- How can I delete TikTok apk v27 [assistant](#message) How can I delete TikTok apk v27.8.3?
-You can delete TikTok apk v27.8.3 by going to the settings of your device and uninstalling the app. You can also delete your TikTok account by going to the app settings and tapping on Manage my account and then Delete account .
- What are the advantages and disadvantages of TikTok apk v27.8.3?
-TikTok apk v27.8.3 has some advantages and disadvantages that you should consider before using it. Here are some of them:
-
-
-Advantages
-Disadvantages
-
-
-
-It allows you to create and share short videos with music, filters, and effects.
-It offers a large library of music and sounds that you can use for your videos.
-It has some new features and improvements that make it more fun and engaging to use.
-It has a huge and diverse user base that you can interact with.
-
-
-It can be addictive and time-consuming if you use it too much.
-It can expose you to inappropriate or harmful content or users if you are not careful.
-It can consume a lot of data and battery if you use it on mobile devices.
-It can have some technical issues or bugs that affect the performance and stability of the app.
-
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bagardi - Baby Stop MP3 for Free.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bagardi - Baby Stop MP3 for Free.md
deleted file mode 100644
index 854c3d13b48d6912d846dca7ef85017dfa9be920..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bagardi - Baby Stop MP3 for Free.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-Bagardi - Baby Stop: A New Hit Song That Will Make You Dance
-Do you love music that makes you feel good and want to move your body? Are you looking for a new song that will spice up your playlist and get you in the mood for fun? If you answered yes to these questions, then you need to check out Bagardi's latest single, Baby Stop. This song is a catchy and upbeat tune that will make you fall in love with Bagardi's voice and style. In this article, we will tell you everything you need to know about Bagardi, Baby Stop, and how to download it.
- Who is Bagardi?
-Bagardi is a young and talented singer from Russia who is making waves in the music industry. He started his career as a rapper, but soon discovered his passion for singing and pop music. He has been releasing songs since 2019, and has gained a loyal fan base who appreciate his unique and versatile sound.
-bagardi baby stop скачать Download File –––––>>> https://gohhs.com/2uPucy
- A rising star in the music industry
-Bagardi is not afraid to experiment with different genres and styles, and he always brings something fresh and original to the table. He has collaborated with various producers and artists, such as DJ Smash, NILETTO, and Zivert. He has also performed at many festivals and events, such as VK Fest, Europa Plus Live, and Love Radio Awards. He has been nominated for several awards, such as Muz-TV Award for Best New Artist, and RU.TV Award for Best Breakthrough.
- His musical influences and style
-Bagardi draws inspiration from many sources, such as hip-hop, R&B, dancehall, reggaeton, and pop. He likes to mix different languages and cultures in his songs, such as English, Russian, Spanish, and Arabic. He also likes to add some humor and irony to his lyrics, which makes his songs more relatable and fun. He describes his style as "pop with a twist", and he aims to make music that can appeal to a wide audience.
- What is Baby Stop?
-Baby Stop is Bagardi's newest single, which was released on June 18, 2021. It is a song that will make you want to dance and sing along with its catchy chorus and melody. It is also a song that will make you feel good and happy with its positive and uplifting message.
-bagardi baby stop mp3 download
-bagardi baby stop lyrics
-bagardi baby stop song
-bagardi baby stop music video
-bagardi baby stop remix
-bagardi baby stop tiktok
-bagardi baby stop spotify
-bagardi baby stop apple music
-bagardi baby stop youtube
-bagardi baby stop ringtone
-bagardi baby stop piano cover
-bagardi baby stop karaoke
-bagardi baby stop instrumental
-bagardi baby stop chords
-bagardi baby stop guitar tabs
-bagardi baby stop reaction
-bagardi baby stop dance challenge
-bagardi baby stop live performance
-bagardi baby stop acoustic version
-bagardi baby stop nightcore
-bagardi baby stop 1 hour loop
-bagardi baby stop slowed and reverb
-bagardi baby stop mashup
-bagardi baby stop genre
-bagardi baby stop meaning
-bagardi baby stop release date
-bagardi baby stop album name
-bagardi baby stop producer
-bagardi baby stop singer name
-bagardi baby stop origin country
-bagardi baby stop language
-bagardi baby stop translation
-bagardi baby stop english lyrics
-bagardi baby stop russian lyrics
-bagardi baby stop romanization
-bagardi baby stop pronunciation guide
-bagardi baby stop fan art
-bagardi baby stop merchandise
-bagardi baby stop wallpaper
-bagardi baby stop quotes
-bagardi baby stop trivia
-bagardi baby stop fun facts
-bagardi baby stop review
-bagardi baby stop rating
-bagardi baby stop awards
-bagardi baby stop chart performance
-bagardi baby stop sales figures
-bagardi baby stop streaming numbers
-bagardi baby stop similar songs
- The meaning and message of the song
-Baby Stop is a song about love and attraction, but also about respect and consent. It is a song that celebrates the beauty and power of women, and encourages them to be confident and assertive. It is also a song that reminds men to be respectful and attentive to women's wishes and boundaries. The song's main message is that love should be fun and enjoyable, but also respectful and mutual.
- The catchy chorus and melody
-Baby Stop has a catchy chorus that will stick in your head for days. The chorus goes like this:
-
-
-Baby love me love me love me Baby stop Baby kiss me kiss me kiss me Baby stop Baby touch me touch me touch me Baby stop Baby tell me tell me tell me What you want
-
-The chorus is simple but effective, as it repeats the words "baby" and "stop" with different verbs in between. The contrast between the words "love", "kiss", "touch", and "tell" creates a sense of tension and excitement, while the word "stop" creates a sense of suspense and curiosity. The melody of the chorus is upbeat and energetic, with a reggaeton rhythm that makes you want to dance and groove along. The chorus is the highlight of the song, and it will make you want to repeat it over and over again.
- The production and release of the song
-Baby Stop was produced by DJ Smash, a famous Russian DJ and producer who has worked with many artists, such as Timati, Polina Gagarina, and Quest Pistols. DJ Smash is known for his club and dance music, and he added his signature touch to Baby Stop. He created a catchy and vibrant beat that matches Bagardi's vocals and style. He also added some elements of Latin music, such as horns, guitars, and percussion, to give the song a more exotic and festive feel.
-Baby Stop was released on June 18, 2021, on various platforms, such as YouTube, Spotify, Apple Music, and VK. The song was accompanied by a colorful and fun music video that features Bagardi and DJ Smash in a tropical setting, surrounded by beautiful women and dancers. The music video has over 10 million views on YouTube, and the song has over 5 million streams on Spotify. The song has also received positive feedback from fans and critics, who praised its catchy chorus, upbeat melody, and positive message.
- How to download Baby Stop?
-If you love Baby Stop as much as we do, you might want to download it to your device so you can listen to it anytime and anywhere. Downloading the song has many benefits, such as saving data, avoiding ads, creating playlists, and supporting the artist. In this section, we will show you how to download Baby Stop from the official platforms and sources.
- The official platforms and sources
-The best way to download Baby Stop is to use the official platforms and sources that are authorized by Bagardi and DJ Smash. These platforms include:
-
-YouTube Music: This is a music streaming service that allows you to download songs and videos from YouTube. You can download Baby Stop from YouTube Music by subscribing to YouTube Premium, which costs $11.99 per month. YouTube Premium also gives you access to ad-free videos, background play, offline access, and YouTube Originals.
-Spotify: This is another music streaming service that allows you to download songs from its library. You can download Baby Stop from Spotify by subscribing to Spotify Premium, which costs $9.99 per month. Spotify Premium also gives you access to ad-free music, unlimited skips, offline mode, and high-quality audio.
-Apple Music: This is a music streaming service that allows you to download songs from its catalog. You can download Baby Stop from Apple Music by subscribing to Apple Music, which costs $9.99 per month. Apple Music also gives you access to ad-free music, offline listening, personalized recommendations, and exclusive content.
-VK: This is a social media platform that allows you to download songs from its community. You can download Baby Stop from VK by creating a free account and joining the Bagardi fan group. VK also gives you access to chat with other fans, share your thoughts, and discover new music.
-
- The benefits of downloading the song
-Downloading Baby Stop has many benefits that will enhance your listening experience and enjoyment. Some of these benefits are:
-
-You can save data: Downloading the song will save you data usage when you listen to it offline. This will help you avoid extra charges or slow internet speed.
-You can avoid ads: Downloading the song will help you avoid annoying ads that interrupt your music flow. This will help you enjoy the song without distractions or interruptions.
-You can create playlists: Downloading the song will allow you to create playlists with your favorite songs. This will help you organize your music library and customize your mood.
-You can support the artist: Downloading the song will show your support for Bagardi and DJ Smash. This will help them earn revenue and recognition for their work.
-
- The steps to download the song
-Downloading Baby Stop is easy and simple if you follow these steps:
-
-Choose your preferred platform from the list above.
-Subscribe to the premium service if required.
-Search for Baby Stop by Bagardi feat. DJ Smash.
-Click on the download button or icon next to the song.
-Wait for the download to complete.
-Enjoy listening to Baby Stop offline!
-
- Conclusion
-Baby Stop is a new hit song by Bagardi feat. DJ Smash that will make you dance and feel good. It is a catchy and upbeat tune that celebrates the beauty and power of women, and encourages them to be confident and assertive. It is also a song that reminds men to be respectful and attentive to women's wishes and boundaries. The song has a catchy chorus, a vibrant melody, and a positive message that will make you happy and uplifted.
-In this article, we have told you everything you need to know about Bagardi, Baby Stop, and how to download it. We have introduced you to Bagardi, a rising star in the music industry who has a unique and versatile style. We have explained the meaning and message of Baby Stop, a song that celebrates love and respect. We have also shown you how to download Baby Stop from the official platforms and sources, and the benefits of doing so.
-We hope you have enjoyed reading this article and learning more about Bagardi and Baby Stop. If you have not listened to the song yet, we urge you to do so as soon as possible. You will not regret it, as it is one of the best songs of the year. You can also share the song with your friends and family, and spread the joy and positivity that it brings. Thank you for reading, and have a great day!
- FAQs
-Here are some frequently asked questions about Bagardi and Baby Stop:
-
-Q: Where can I watch the music video of Baby Stop?
-A: You can watch the music video of Baby Stop on YouTube, by clicking on this link: . You can also watch it on VK, by clicking on this link: .
-Q: Where can I follow Bagardi on social media?
-A: You can follow Bagardi on Instagram, by clicking on this link: . You can also follow him on VK, by clicking on this link: .
-Q: What are some other songs by Bagardi that I should listen to?
-A: Some other songs by Bagardi that you should listen to are: Ya Lyublyu Tebya (I Love You), S Toboy (With You), Zvezda (Star), and Vse Budet Horosho (Everything Will Be Fine).
-Q: What are some other songs by DJ Smash that I should listen to?
-A: Some other songs by DJ Smash that you should listen to are: Moscow Never Sleeps, Volna (Wave), Ptitsa (Bird), and Luchshiye Pesni (Best Songs).
-Q: How can I support Bagardi and DJ Smash?
-A: You can support Bagardi and DJ Smash by downloading their songs from the official platforms and sources, streaming their songs on music services, watching their videos on YouTube and VK, following them on social media, sharing their songs with your friends and family, and attending their concerts and events.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fffffu/bing/src/lib/storage.ts b/spaces/fffffu/bing/src/lib/storage.ts
deleted file mode 100644
index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000
--- a/spaces/fffffu/bing/src/lib/storage.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import { getMany, set, del, clear } from 'idb-keyval';
-
-export const Storage = {
- async get(key: string | string[] | null): Promise {
- if (key === null) return null;
- if (typeof key === 'string') {
- key = [key]
- }
- const returnData: Record = {}
- const values = await getMany(key)
- key.forEach((k, idx)=> {
- returnData[k] = values[idx]
- })
- return returnData;
- },
- async set(object: any) {
- for (let key of Object.keys(object)) {
- await set(key, object[key])
- }
- },
- async remove(key: string) {
- return del(key);
- },
- async clear() {
- return clear();
- }
-}
diff --git a/spaces/fffiloni/Image-Caption-2-Shap-E/app.py b/spaces/fffiloni/Image-Caption-2-Shap-E/app.py
deleted file mode 100644
index 8691018e66fd13589cafa97f31b82dbbe12d6292..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Image-Caption-2-Shap-E/app.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-
-import os
-
-import gradio as gr
-import torch
-
-#from app_image_to_3d import create_demo as create_demo_image_to_3d
-from app_text_to_3d import create_demo as create_demo_text_to_3d
-from model import Model
-
-DESCRIPTION = '# Image Caption to [Shap-E](https://github.com/openai/shap-e)'
-
-if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
- DESCRIPTION += f'\nFor faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
'
-if not torch.cuda.is_available():
- DESCRIPTION += '\nRunning on CPU 🥶 This demo does not work on CPU.
'
-
-model = Model()
-
-with gr.Blocks(css='style.css') as demo:
- with gr.Column(elem_id="col-container"):
- gr.Markdown(DESCRIPTION)
- create_demo_text_to_3d(model)
-
-demo.queue(max_size=10).launch()
diff --git a/spaces/fffiloni/Music_Source_Separation/scripts/3_create_evaluation_audios/vctk-musdb18/create_evaluation_audios.sh b/spaces/fffiloni/Music_Source_Separation/scripts/3_create_evaluation_audios/vctk-musdb18/create_evaluation_audios.sh
deleted file mode 100644
index b12a57c6e2ddafe7e9db2d9240b58d00898b2c8a..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Music_Source_Separation/scripts/3_create_evaluation_audios/vctk-musdb18/create_evaluation_audios.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-VCTK_DATASET_DIR=${1:-"./datasets/vctk"}
-MUSDB18_DATASET_DIR=${2:-"./datasets/musdb18"}
-WORKSPACE=${3:-"./workspaces/bytesep"}
-
-SAMPLE_RATE=44100
-CHANNELS=2
-EVALUATION_SEGMENTS_NUM=100
-
-EVLUATION_AUDIOS_DIR="${WORKSPACE}/evaluation_audios/vctk-musdb18"
-
-python3 bytesep/dataset_creation/create_evaluation_audios/vctk-musdb18.py \
- --vctk_dataset_dir=$VCTK_DATASET_DIR \
- --musdb18_dataset_dir=$MUSDB18_DATASET_DIR \
- --evaluation_audios_dir=$EVLUATION_AUDIOS_DIR \
- --sample_rate=$SAMPLE_RATE \
- --channels=$CHANNELS \
- --evaluation_segments_num=$EVALUATION_SEGMENTS_NUM
-
\ No newline at end of file
diff --git a/spaces/frncscp/bullerengue/musika/22kHz/musika_train.py b/spaces/frncscp/bullerengue/musika/22kHz/musika_train.py
deleted file mode 100644
index 25bd2f4d0a3b7b7d85036ce261373a1c5e8cc29f..0000000000000000000000000000000000000000
--- a/spaces/frncscp/bullerengue/musika/22kHz/musika_train.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from parse import parse_args
-from data import Data_functions
-from models import Models_functions
-from utils import Utils_functions
-from train import Train_functions
-
-if __name__ == "__main__":
-
- # parse args
- args = parse_args()
-
- # create dataset
- D = Data_functions(args)
- ds = D.create_dataset()
-
- # initialize networks
- M = Models_functions(args)
- models_ls = M.get_networks()
-
- # test musika in real-time during training
- U = Utils_functions(args)
- U.render_gradio(models_ls)
-
- # train musika
- T = Train_functions(args)
- T.train(ds, models_ls)
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Cars 3 (English) Telugu Full Movie Download Mp4 PATCHED.md b/spaces/gotiQspiryo/whisper-ui/examples/Cars 3 (English) Telugu Full Movie Download Mp4 PATCHED.md
deleted file mode 100644
index 7c597345b4fd965bfdd23ec3bf519459f782b53c..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Cars 3 (English) Telugu Full Movie Download Mp4 PATCHED.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
- coolmoviez live and coolmoviez in is a popular torrent website that allows to download hollywood, bollywood, south indian movies, telugu movies, hindi movies, tamil movies, malayalam movie downloads for free. movies are available to download in many languages like hindi, tamil, punjabi, telugu, english, malayalam in kuttymovies. but as we have already said that it is a crime to piracy a movie and download a piracy movie, so we do not recommend downloading a movie through telegram or any other website.
-Cars 3 (English) telugu full movie download mp4 Download ===> https://urlgoal.com/2uyNzO
- kuttymovies in is a popular torrent website that allows to download hollywood, bollywood, south indian movies, telugu movies, hindi movies, tamil movies, malayalam movie downloads for free. movies are available to download in many languages like hindi, tamil, punjabi, telugu, english, malayalam in kuttymovies. but as we have already said that it is a crime to piracy a movie and download a piracy movie, so we do not recommend downloading a movie through telegram or any other website.
- coolmoviez live and coolmoviez in is a popular torrent website that allows to download hollywood, bollywood, south indian movies, telugu movies, hindi movies, tamil movies, malayalam movie downloads for free. movies are available to download in many languages like tamil, marathi, telugu, punjabi, english, malayalam in kuttymovies. but as we have already said that it is a crime to piracy a movie and download a piracy movie, so we do not recommend downloading a movie through telegram or any other website.
- coolmoviez live and coolmoviez in is a popular torrent website that allows to download hollywood, bollywood, south indian movies, telugu movies, hindi movies, tamil movies, malayalam movie downloads for free.
-
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X5 Keygen REPACK Rar.md b/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X5 Keygen REPACK Rar.md
deleted file mode 100644
index b1816f80e1e1eb406a2d86c72cb1f5a59de157e8..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X5 Keygen REPACK Rar.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Corel draw x5 keygen rar Download Zip ►►► https://urlgoal.com/2uyMx1
-
- 3cee63e6c2
-
-
-
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Electron Configuration Gizmo Answers Key.rar How to Master It.md b/spaces/gotiQspiryo/whisper-ui/examples/Electron Configuration Gizmo Answers Key.rar How to Master It.md
deleted file mode 100644
index da81895d32d59134fc73b791e86a68314863fd25..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Electron Configuration Gizmo Answers Key.rar How to Master It.md
+++ /dev/null
@@ -1,6 +0,0 @@
-electron configuration gizmo answers key.rar Download Zip 🗸🗸🗸 https://urlgoal.com/2uyMJe
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/gradio/HuBERT/fairseq/modules/quantization/pq/modules/qlinear.py b/spaces/gradio/HuBERT/fairseq/modules/quantization/pq/modules/qlinear.py
deleted file mode 100644
index 9bdd25a8685bb7c7b32e1f02372aaeb26d8ba53a..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/fairseq/modules/quantization/pq/modules/qlinear.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class PQLinear(nn.Module):
- """
- Quantized counterpart of nn.Linear module. Stores the centroid, the assignments
- and the non-quantized biases. The full weight is re-instantiated at each forward
- pass.
-
- Args:
- - centroids: centroids of size n_centroids x block_size
- - assignments: assignments of the centroids to the subvectors
- of size self.out_features x n_blocks
- - bias: the non-quantized bias
-
- Remarks:
- - We refer the reader to the official documentation of the nn.Linear module
- for the other arguments and the behavior of the module
- - Performance tests on GPU show that this implementation is 15% slower than
- the non-quantized nn.Linear module for a standard training loop.
- """
-
- def __init__(self, centroids, assignments, bias, in_features, out_features):
- super(PQLinear, self).__init__()
- self.block_size = centroids.size(1)
- self.n_centroids = centroids.size(0)
- self.in_features = in_features
- self.out_features = out_features
- # check compatibility
- if self.in_features % self.block_size != 0:
- raise ValueError("Wrong PQ sizes")
- if len(assignments) % self.out_features != 0:
- raise ValueError("Wrong PQ sizes")
- # define parameters
- self.centroids = nn.Parameter(centroids, requires_grad=True)
- self.register_buffer("assignments", assignments)
- self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
- if bias is not None:
- self.bias = nn.Parameter(bias)
- else:
- self.register_parameter("bias", None)
-
- @property
- def weight(self):
- return (
- self.centroids[self.assignments]
- .reshape(-1, self.out_features, self.block_size)
- .permute(1, 0, 2)
- .flatten(1, 2)
- )
-
- def forward(self, x):
- return F.linear(
- x,
- self.weight,
- self.bias,
- )
-
- def extra_repr(self):
- return f"in_features={self.in_features},\
- out_features={self.out_features},\
- n_centroids={self.n_centroids},\
- block_size={self.block_size},\
- bias={self.bias is not None}"
diff --git a/spaces/gradio/HuBERT/fairseq/optim/adamax.py b/spaces/gradio/HuBERT/fairseq/optim/adamax.py
deleted file mode 100644
index 98ff8ad7ad6c12ab5efc53ca76db2f1663be7906..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/fairseq/optim/adamax.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-import torch.optim
-
-from . import LegacyFairseqOptimizer, register_optimizer
-
-
-@register_optimizer("adamax")
-class FairseqAdamax(LegacyFairseqOptimizer):
- def __init__(self, args, params):
- super().__init__(args)
- self._optimizer = Adamax(params, **self.optimizer_config)
-
- @staticmethod
- def add_args(parser):
- """Add optimizer-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B',
- help='betas for Adam optimizer')
- parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D',
- help='epsilon for Adam optimizer')
- parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
- help='weight decay')
- parser.add_argument('--no-bias-correction', default=False, action='store_true',
- help='disable bias correction')
- # fmt: on
-
- @property
- def optimizer_config(self):
- """
- Return a kwarg dictionary that will be used to override optimizer
- args stored in checkpoints. This allows us to load a checkpoint and
- resume training using a different set of optimizer args, e.g., with a
- different learning rate.
- """
- return {
- "lr": self.args.lr[0],
- "betas": eval(self.args.adamax_betas),
- "eps": self.args.adamax_eps,
- "weight_decay": self.args.weight_decay,
- "bias_correction": not self.args.no_bias_correction,
- }
-
-
-class Adamax(torch.optim.Optimizer):
- """Implements Adamax algorithm (a variant of Adam based on infinity norm).
-
- It has been proposed in `Adam: A Method for Stochastic Optimization`__.
-
- Compared to the version in PyTorch, this version implements a fix for weight decay.
-
- Args:
- params (iterable): iterable of parameters to optimize or dicts defining
- parameter groups
- lr (float, optional): learning rate (default: 2e-3)
- betas (Tuple[float, float], optional): coefficients used for computing
- running averages of gradient and its square
- eps (float, optional): term added to the denominator to improve
- numerical stability (default: 1e-8)
- weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
- bias_correction (bool, optional): enable bias correction (default: True)
-
- __ https://arxiv.org/abs/1412.6980
- """
-
- def __init__(
- self,
- params,
- lr=2e-3,
- betas=(0.9, 0.999),
- eps=1e-8,
- weight_decay=0,
- bias_correction=True,
- ):
- if not 0.0 <= lr:
- raise ValueError("Invalid learning rate: {}".format(lr))
- if not 0.0 <= eps:
- raise ValueError("Invalid epsilon value: {}".format(eps))
- if not 0.0 <= betas[0] < 1.0:
- raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
- if not 0.0 <= betas[1] < 1.0:
- raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
- if not 0.0 <= weight_decay:
- raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
-
- defaults = dict(
- lr=lr,
- betas=betas,
- eps=eps,
- weight_decay=weight_decay,
- bias_correction=bias_correction,
- )
- super(Adamax, self).__init__(params, defaults)
-
- @property
- def supports_memory_efficient_fp16(self):
- return True
-
- @property
- def supports_flat_params(self):
- return True
-
- def step(self, closure=None):
- """Performs a single optimization step.
-
- Args:
- closure (callable, optional): A closure that reevaluates the model
- and returns the loss.
- """
- loss = None
- if closure is not None:
- loss = closure()
-
- for group in self.param_groups:
- for p in group["params"]:
- if p.grad is None:
- continue
- grad = p.grad.data.float()
- if grad.is_sparse:
- raise RuntimeError("Adamax does not support sparse gradients")
-
- p_data_fp32 = p.data
- if p.data.dtype in {torch.float16, torch.bfloat16}:
- p_data_fp32 = p_data_fp32.float()
-
- state = self.state[p]
-
- # State initialization
- if len(state) == 0:
- state["step"] = 0
- state["exp_avg"] = torch.zeros_like(p_data_fp32)
- state["exp_inf"] = torch.zeros_like(p_data_fp32)
- else:
- state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
- state["exp_inf"] = state["exp_inf"].to(p_data_fp32)
-
- exp_avg, exp_inf = state["exp_avg"], state["exp_inf"]
- beta1, beta2 = group["betas"]
- eps = group["eps"]
-
- state["step"] += 1
-
- # Update biased first moment estimate.
- exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
-
- # Update the exponentially weighted infinity norm.
- torch.max(
- exp_inf.mul_(beta2),
- grad.abs_(),
- out=exp_inf,
- )
-
- step_size = group["lr"]
- if group["bias_correction"]:
- bias_correction = 1 - beta1 ** state["step"]
- step_size /= bias_correction
-
- if group["weight_decay"] != 0:
- p_data_fp32.add_(
- p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
- )
-
- p_data_fp32.addcdiv_(exp_avg, exp_inf.add(eps), value=-step_size)
-
- if p.data.dtype in {torch.float16, torch.bfloat16}:
- p.data.copy_(p_data_fp32)
-
- return loss
diff --git a/spaces/h2oai/wave-tour/examples/table_pagination_h2o3.py b/spaces/h2oai/wave-tour/examples/table_pagination_h2o3.py
deleted file mode 100644
index 730078420857c329fcf31fda1bc26321c839fb23..0000000000000000000000000000000000000000
--- a/spaces/h2oai/wave-tour/examples/table_pagination_h2o3.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# Table / Pagination / H2O-3 Dataframe
-# Use a paginated #table to display large (100m+ rows) tabular data using a H2O-3 dataframe.
-# #form #table #pagination #h2o3
-# ---
-
-import os
-from time import time
-
-import h2o
-from h2o_wave import Q, app, main, ui
-from loguru import logger
-
-# This example requires H2O-3 to be running.
-
-
-@app("/demo")
-async def serve(q: Q):
- logger.info(q.args)
- logger.info(q.events)
-
- if not q.app.initialized:
- # This is called the first time our app runs
- # Variables created here will be the same of all users of the app
- # Save a direct link to our H2O Dataframe for all users to use throughout the app
- try:
- h2o.connect(url="http://127.0.0.1:54321")
- except:
- q.page['err'] = ui.form_card(box='1 1 4 2', items=[
- ui.message_bar(type='error', text='Could not connect to H2O3. Please ensure H2O3 is running.'),
- ])
- await q.page.save()
- logger.error("H2O-3 is not running")
- return
- q.app.h2o_df = h2o.get_frame("py_6_sid_aff3")
-
- # EXAMPLE OF CREATING A LARGE DATAFRAME
- # h2o_df = h2o.create_frame(
- # rows=1000000,
- # cols=5,
- # categorical_fraction=0.6,
- # integer_fraction=0,
- # binary_fraction=0,
- # real_range=100,
- # integer_range=100,
- # missing_fraction=0,
- # seed=1234,
- # )
-
- q.app.rows_per_page = 10 # TODO: How many rows do you want to show users at a time
-
- # A list of booleans for if a column is sortable or not, by default
- # we allow all and only numeric columns to be sorted based on H2O-3 functionality
- # TODO: You may want to make a hardcoded list of [True, False] for your own use cases
- q.app.column_sortable = q.app.h2o_df.isnumeric()
-
- # A list of booleans for if a column is filterable or not, by default,
- # we allow all and only categorical columns to be sorted based on H2O-3 functionality
- # TODO: You may want to make a hardcoded list of [True, False] for your own use cases
- q.app.column_filterable = q.app.h2o_df.isfactor()
-
- # A list of booleans for if a column is searchable or not, by default,
- # we allow all and only categorical and string columns to be sorted based on H2O-3 functionality
- # TODO: You may want to make a hardcoded list of [True, False] for your own use cases
- q.app.column_searchable = q.app.h2o_df.isfactor() + q.app.h2o_df.isstring()
-
- q.app.initialized = True
-
- if not q.client.initialized:
- # This is called for each new browser that visits the app
- # Multiple users can interact with the table at the same time without interrupting each other
- # Users can make multiple changes to the table such as sorting and filtering
-
- q.client.search = None
- q.client.sort = None
- q.client.filters = None
- q.client.page_offset = 0
- q.client.total_rows = len(q.app.h2o_df)
-
- # Create the default UI for this user
- q.page["meta"] = ui.meta_card(box="")
- q.page["table_card"] = ui.form_card(
- box="1 1 -1 -1",
- items=[
- ui.table(
- name="h2o_table", # TODO: if you change this, you need to remember to update the serve function
- columns=[
- ui.table_column(
- name=q.app.h2o_df.columns[i],
- label=q.app.h2o_df.columns[i],
- sortable=q.app.column_sortable[i],
- filterable=q.app.column_filterable[i],
- searchable=q.app.column_searchable[i],
- )
- for i in range(len(q.app.h2o_df.columns))
- ],
- rows=get_table_rows(q),
- resettable=True,
- downloadable=True,
- pagination=ui.table_pagination(
- total_rows=q.client.total_rows,
- rows_per_page=q.app.rows_per_page,
- ),
- events=[
- "page_change",
- "sort",
- "filter",
- "search",
- "reset",
- "download",
- ],
- )
- ],
- )
- q.client.initialized = True
-
- # Check if user triggered any table action and save it to local state for allowing multiple
- # actions to be performed on the data at the same time, e.g. sort the filtered data etc.
- if q.events.h2o_table:
- logger.info("table event occurred")
-
- if q.events.h2o_table.page_change:
- logger.info(f"table page change: {q.events.h2o_table.page_change}")
- q.client.page_offset = q.events.h2o_table.page_change.get("offset", 0)
-
- if q.events.h2o_table.sort:
- logger.info(f"table sort: {q.events.h2o_table.sort}")
- q.client.sort = q.events.h2o_table.sort
- q.client.page_offset = 0
-
- if q.events.h2o_table.filter:
- logger.info(f"table filter: {q.events.h2o_table.filter}")
- q.client.filters = q.events.h2o_table.filter
- q.client.page_offset = 0
-
- if q.events.h2o_table.search is not None:
- logger.info(f"table search: {q.events.h2o_table.search}")
- q.client.search = q.events.h2o_table.search
- q.client.page_offset = 0
-
- if q.events.h2o_table.download:
- await download_h2o_table(q)
-
- if q.events.h2o_table.reset:
- logger.info("table reset")
- q.client.search = None
- q.client.sort = None
- q.client.filters = None
- q.client.page_offset = 0
- q.client.total_rows = len(q.app.h2o_df)
-
- # Update the rows in our UI
- # TODO: if you change where your table is located, this needs updating
- q.page["table_card"].h2o_table.rows = get_table_rows(q)
- q.page["table_card"].h2o_table.pagination.total_rows = q.client.total_rows
-
- await q.page.save()
-
-
-def get_table_rows(q: Q):
- logger.info(
- f"Creating new table for rows: {q.client.page_offset} to {q.client.page_offset + q.app.rows_per_page}"
- )
-
- working_frame = prepare_h2o_data(q)
-
- # Bring our limited UI rows locally to pandas to prepare for our ui.table
- local_df = working_frame[
- q.client.page_offset:q.client.page_offset + q.app.rows_per_page, :
- ].as_data_frame()
- q.client.total_rows = len(working_frame)
-
- table_rows = [
- ui.table_row(
- name=str(
- q.client.page_offset + i
- ), # name is the index on the h2o dataframe for appropriate lookup
- cells=[str(local_df[col].values[i]) for col in local_df.columns.to_list()],
- )
- for i in range(len(local_df))
- ]
-
- h2o.remove(working_frame) # remove our duplicate work
-
- return table_rows
-
-
-async def download_h2o_table(q: Q):
- # Create a unique file name as this is a multi-user app
- local_file_path = f"h2o3_data_{str(int(time()))}.csv"
- working_frame = prepare_h2o_data(q)
-
- h2o.download_csv(working_frame, local_file_path)
- (wave_file_path,) = await q.site.upload([local_file_path])
- os.remove(local_file_path)
-
- q.page["meta"].script = ui.inline_script(f'window.open("{wave_file_path}")')
-
-
-def prepare_h2o_data(q: Q):
-
- # This is used to prep the data we want to show on the screen or download, so it gets its own function
- # If you have 5 users at the same time, there will be 6 large dataframes in h2o3 - ensure proper cluster size
- working_frame = h2o.deep_copy(q.app.h2o_df, "working_df")
-
- if q.client.sort is not None:
- # H2O-3 can only sort numeric values - if the developer allows users to sort
- # string columns the end users will see unexpected results
-
- working_frame = working_frame.sort(
- by=list(q.client.sort.keys()), ascending=list(q.client.sort.values())
- )
-
- if q.client.filters is not None:
-
- for key in q.client.filters.keys():
- working_frame = working_frame[
- working_frame[key].match(q.client.filters[key])
- ]
-
- if q.client.search is not None:
- # We check if our search term is in any of the searchable columns
- # Start with and index of 0s and then filter to only keep rows with index > 0
-
- index = h2o.create_frame(
- rows=len(working_frame), cols=1, integer_fraction=1, integer_range=1
- )
- index["C1"] = 0
- for i in range(len(q.app.h2o_df.columns)):
- if q.app.column_searchable[i]:
- index = index + working_frame[q.app.h2o_df.columns[i]].grep(
- pattern=q.client.search, ignore_case=True, output_logical=True
- )
-
- working_frame = working_frame[index]
- return working_frame
diff --git a/spaces/haakohu/deep_privacy2/configs/anonymizers/FB_cse_mask_face.py b/spaces/haakohu/deep_privacy2/configs/anonymizers/FB_cse_mask_face.py
deleted file mode 100644
index d411d66cc051f6b4c0d907551735e8f661cf17f1..0000000000000000000000000000000000000000
--- a/spaces/haakohu/deep_privacy2/configs/anonymizers/FB_cse_mask_face.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from dp2.anonymizer import Anonymizer
-from dp2.detection.cse_mask_face_detector import CSeMaskFaceDetector
-from ..defaults import common
-from tops.config import LazyCall as L
-
-detector = L(CSeMaskFaceDetector)(
- mask_rcnn_cfg=dict(),
- face_detector_cfg=dict(),
- face_post_process_cfg=dict(target_imsize=(256, 256), fdf128_expand=False),
- cse_cfg=dict(),
- cse_post_process_cfg=dict(
- target_imsize=(288, 160),
- exp_bbox_cfg=dict(percentage_background=0.3, axis_minimum_expansion=.1),
- exp_bbox_filter=dict(minimum_area=32*32, min_bbox_ratio_inside=0, aspect_ratio_range=[0, 99999]),
- iou_combine_threshold=0.4,
- dilation_percentage=0.02,
- normalize_embedding=False
- ),
- score_threshold=0.3,
- cache_directory=common.output_dir.joinpath("cse_mask_face_detection_cache")
-)
-
-anonymizer = L(Anonymizer)(
- detector="${detector}",
- face_G_cfg="configs/fdf/stylegan.py",
- person_G_cfg="configs/fdh/styleganL_nocse.py",
- cse_person_G_cfg="configs/fdh/styleganL.py",
- car_G_cfg="configs/generators/dummy/pixelation8.py"
-)
diff --git a/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/filtered_lrelu.py b/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/filtered_lrelu.py
deleted file mode 100644
index 6106c917d1cbff4f1cf637390dd6ba0c597a830f..0000000000000000000000000000000000000000
--- a/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/filtered_lrelu.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-import os
-import numpy as np
-import torch
-import warnings
-
-from .. import custom_ops
-from .. import misc
-from . import upfirdn2d
-from . import bias_act
-
-#----------------------------------------------------------------------------
-
-_plugin = None
-
-def _init():
- global _plugin
- if _plugin is None:
- _plugin = custom_ops.get_plugin(
- module_name='filtered_lrelu_plugin',
- sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'],
- headers=['filtered_lrelu.h', 'filtered_lrelu.cu'],
- source_dir=os.path.dirname(__file__),
- extra_cuda_cflags=['--use_fast_math'],
- )
- return True
-
-def _get_filter_size(f):
- if f is None:
- return 1, 1
- assert isinstance(f, torch.Tensor)
- assert 1 <= f.ndim <= 2
- return f.shape[-1], f.shape[0] # width, height
-
-def _parse_padding(padding):
- if isinstance(padding, int):
- padding = [padding, padding]
- assert isinstance(padding, (list, tuple))
- assert all(isinstance(x, (int, np.integer)) for x in padding)
- padding = [int(x) for x in padding]
- if len(padding) == 2:
- px, py = padding
- padding = [px, px, py, py]
- px0, px1, py0, py1 = padding
- return px0, px1, py0, py1
-
-#----------------------------------------------------------------------------
-
-def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):
- r"""Filtered leaky ReLU for a batch of 2D images.
-
- Performs the following sequence of operations for each channel:
-
- 1. Add channel-specific bias if provided (`b`).
-
- 2. Upsample the image by inserting N-1 zeros after each pixel (`up`).
-
- 3. Pad the image with the specified number of zeros on each side (`padding`).
- Negative padding corresponds to cropping the image.
-
- 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it
- so that the footprint of all output pixels lies within the input image.
-
- 5. Multiply each value by the provided gain factor (`gain`).
-
- 6. Apply leaky ReLU activation function to each value.
-
- 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.
-
- 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking
- it so that the footprint of all output pixels lies within the input image.
-
- 9. Downsample the image by keeping every Nth pixel (`down`).
-
- The fused op is considerably more efficient than performing the same calculation
- using standard PyTorch ops. It supports gradients of arbitrary order.
-
- Args:
- x: Float32/float16/float64 input tensor of the shape
- `[batch_size, num_channels, in_height, in_width]`.
- fu: Float32 upsampling FIR filter of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable), or
- `None` (identity).
- fd: Float32 downsampling FIR filter of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable), or
- `None` (identity).
- b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
- as `x`. The length of vector must must match the channel dimension of `x`.
- up: Integer upsampling factor (default: 1).
- down: Integer downsampling factor. (default: 1).
- padding: Padding with respect to the upsampled image. Can be a single number
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
- (default: 0).
- gain: Overall scaling factor for signal magnitude (default: sqrt(2)).
- slope: Slope on the negative side of leaky ReLU (default: 0.2).
- clamp: Maximum magnitude for leaky ReLU output (default: None).
- flip_filter: False = convolution, True = correlation (default: False).
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
-
- Returns:
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
- """
- assert isinstance(x, torch.Tensor)
- assert impl in ['ref', 'cuda']
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
- return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)
- return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)
-
-#----------------------------------------------------------------------------
-
-@misc.profiled_function
-def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
- """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using
- existing `upfirdn2n()` and `bias_act()` ops.
- """
- assert isinstance(x, torch.Tensor) and x.ndim == 4
- fu_w, fu_h = _get_filter_size(fu)
- fd_w, fd_h = _get_filter_size(fd)
- if b is not None:
- assert isinstance(b, torch.Tensor) and b.dtype == x.dtype
- misc.assert_shape(b, [x.shape[1]])
- assert isinstance(up, int) and up >= 1
- assert isinstance(down, int) and down >= 1
- px0, px1, py0, py1 = _parse_padding(padding)
- assert gain == float(gain) and gain > 0
- assert slope == float(slope) and slope >= 0
- assert clamp is None or (clamp == float(clamp) and clamp >= 0)
-
- # Calculate output size.
- batch_size, channels, in_h, in_w = x.shape
- in_dtype = x.dtype
- out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down
- out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down
-
- # Compute using existing ops.
- x = bias_act.bias_act(x=x, b=b) # Apply bias.
- x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
- x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp.
- x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample.
-
- # Check output shape & dtype.
- misc.assert_shape(x, [batch_size, channels, out_h, out_w])
- assert x.dtype == in_dtype
- return x
-
-#----------------------------------------------------------------------------
-
-_filtered_lrelu_cuda_cache = dict()
-
-def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
- """Fast CUDA implementation of `filtered_lrelu()` using custom ops.
- """
- assert isinstance(up, int) and up >= 1
- assert isinstance(down, int) and down >= 1
- px0, px1, py0, py1 = _parse_padding(padding)
- assert gain == float(gain) and gain > 0
- gain = float(gain)
- assert slope == float(slope) and slope >= 0
- slope = float(slope)
- assert clamp is None or (clamp == float(clamp) and clamp >= 0)
- clamp = float(clamp if clamp is not None else 'inf')
-
- # Lookup from cache.
- key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter)
- if key in _filtered_lrelu_cuda_cache:
- return _filtered_lrelu_cuda_cache[key]
-
- # Forward op.
- class FilteredLReluCuda(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ
- assert isinstance(x, torch.Tensor) and x.ndim == 4
-
- # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable).
- if fu is None:
- fu = torch.ones([1, 1], dtype=torch.float32, device=x.device)
- if fd is None:
- fd = torch.ones([1, 1], dtype=torch.float32, device=x.device)
- assert 1 <= fu.ndim <= 2
- assert 1 <= fd.ndim <= 2
-
- # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1.
- if up == 1 and fu.ndim == 1 and fu.shape[0] == 1:
- fu = fu.square()[None]
- if down == 1 and fd.ndim == 1 and fd.shape[0] == 1:
- fd = fd.square()[None]
-
- # Missing sign input tensor.
- if si is None:
- si = torch.empty([0])
-
- # Missing bias tensor.
- if b is None:
- b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device)
-
- # Construct internal sign tensor only if gradients are needed.
- write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad)
-
- # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout.
- strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1]
- if any(a < b for a, b in zip(strides[:-1], strides[1:])):
- warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning)
-
- # Call C++/Cuda plugin if datatype is supported.
- if x.dtype in [torch.float16, torch.float32]:
- if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device):
- warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning)
- y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs)
- else:
- return_code = -1
-
- # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because
- # only the bit-packed sign tensor is retained for gradient computation.
- if return_code < 0:
- warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning)
-
- y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias.
- y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
- so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place.
- y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample.
-
- # Prepare for gradient computation.
- ctx.save_for_backward(fu, fd, (si if si.numel() else so))
- ctx.x_shape = x.shape
- ctx.y_shape = y.shape
- ctx.s_ofs = sx, sy
- return y
-
- @staticmethod
- def backward(ctx, dy): # pylint: disable=arguments-differ
- fu, fd, si = ctx.saved_tensors
- _, _, xh, xw = ctx.x_shape
- _, _, yh, yw = ctx.y_shape
- sx, sy = ctx.s_ofs
- dx = None # 0
- dfu = None; assert not ctx.needs_input_grad[1]
- dfd = None; assert not ctx.needs_input_grad[2]
- db = None # 3
- dsi = None; assert not ctx.needs_input_grad[4]
- dsx = None; assert not ctx.needs_input_grad[5]
- dsy = None; assert not ctx.needs_input_grad[6]
-
- if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]:
- pp = [
- (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0,
- xw * up - yw * down + px0 - (up - 1),
- (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0,
- xh * up - yh * down + py0 - (up - 1),
- ]
- gg = gain * (up ** 2) / (down ** 2)
- ff = (not flip_filter)
- sx = sx - (fu.shape[-1] - 1) + px0
- sy = sy - (fu.shape[0] - 1) + py0
- dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy)
-
- if ctx.needs_input_grad[3]:
- db = dx.sum([0, 2, 3])
-
- return dx, dfu, dfd, db, dsi, dsx, dsy
-
- # Add to cache.
- _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda
- return FilteredLReluCuda
-
-#----------------------------------------------------------------------------
diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index 5e8aaa2d3722e7e73a3d94b2b7dfc4f751d7a240..0000000000000000000000000000000000000000
--- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Please select an issue template from
-https://github.com/facebookresearch/detectron2/issues/new/choose .
-
-Otherwise your issue will be closed.
diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/memory.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/memory.py
deleted file mode 100644
index d495a1681f460668c96f64454e31e7f2fca8137a..0000000000000000000000000000000000000000
--- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/memory.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-
-import logging
-from contextlib import contextmanager
-from functools import wraps
-import torch
-
-__all__ = ["retry_if_cuda_oom"]
-
-
-@contextmanager
-def _ignore_torch_cuda_oom():
- """
- A context which ignores CUDA OOM exception from pytorch.
- """
- try:
- yield
- except RuntimeError as e:
- # NOTE: the string may change?
- if "CUDA out of memory. " in str(e):
- pass
- else:
- raise
-
-
-def retry_if_cuda_oom(func):
- """
- Makes a function retry itself after encountering
- pytorch's CUDA OOM error.
- It will first retry after calling `torch.cuda.empty_cache()`.
-
- If that still fails, it will then retry by trying to convert inputs to CPUs.
- In this case, it expects the function to dispatch to CPU implementation.
- The return values may become CPU tensors as well and it's user's
- responsibility to convert it back to CUDA tensor if needed.
-
- Args:
- func: a stateless callable that takes tensor-like objects as arguments
-
- Returns:
- a callable which retries `func` if OOM is encountered.
-
- Examples:
-
- .. code-block:: python
-
- output = retry_if_cuda_oom(some_torch_function)(input1, input2)
- # output may be on CPU even if inputs are on GPU
-
- Note:
- 1. When converting inputs to CPU, it will only look at each argument and check
- if it has `.device` and `.to` for conversion. Nested structures of tensors
- are not supported.
-
- 2. Since the function might be called more than once, it has to be
- stateless.
- """
-
- def maybe_to_cpu(x):
- try:
- like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
- except AttributeError:
- like_gpu_tensor = False
- if like_gpu_tensor:
- return x.to(device="cpu")
- else:
- return x
-
- @wraps(func)
- def wrapped(*args, **kwargs):
- with _ignore_torch_cuda_oom():
- return func(*args, **kwargs)
-
- # Clear cache and retry
- torch.cuda.empty_cache()
- with _ignore_torch_cuda_oom():
- return func(*args, **kwargs)
-
- # Try on CPU. This slows down the code significantly, therefore print a notice.
- logger = logging.getLogger(__name__)
- logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
- new_args = (maybe_to_cpu(x) for x in args)
- new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
- return func(*new_args, **new_kwargs)
-
- return wrapped
diff --git a/spaces/hekbobo/bingo/src/lib/isomorphic/index.ts b/spaces/hekbobo/bingo/src/lib/isomorphic/index.ts
deleted file mode 100644
index 738dc92f74079ab762d584fb7422a8c8c3b61547..0000000000000000000000000000000000000000
--- a/spaces/hekbobo/bingo/src/lib/isomorphic/index.ts
+++ /dev/null
@@ -1,17 +0,0 @@
-'use client'
-
-import Default from './browser'
-
-let exportsModel: any = {}
-
-if (process.browser) {
- Object.assign(exportsModel, require('./browser').default)
-} else {
- Object.assign(exportsModel, require('./node').default)
-}
-
-export default exportsModel! as typeof Default
-
-export const fetch: typeof Default.fetch = exportsModel!.fetch
-export const WebSocket: typeof Default.WebSocket = exportsModel!.WebSocket
-export const debug: typeof Default.debug = exportsModel!.debug
diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py
deleted file mode 100644
index 4505be90d88dc29c21501ace680d1f122681f46c..0000000000000000000000000000000000000000
--- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import shutil
-from multiprocessing import Pool
-
-import SimpleITK as sitk
-import numpy as np
-from batchgenerators.utilities.file_and_folder_operations import *
-from skimage.io import imread
-from skimage.io import imsave
-from skimage.morphology import disk
-from skimage.morphology import erosion
-from skimage.transform import resize
-
-from nnunet.paths import nnUNet_raw_data
-
-
-def load_bmp_convert_to_nifti_borders_2d(img_file, lab_file, img_out_base, anno_out, spacing, border_thickness=0.7):
- img = imread(img_file)
- img_itk = sitk.GetImageFromArray(img.astype(np.float32)[None])
- img_itk.SetSpacing(list(spacing)[::-1] + [999])
- sitk.WriteImage(img_itk, join(img_out_base + "_0000.nii.gz"))
-
- if lab_file is not None:
- l = imread(lab_file)
- borders = generate_border_as_suggested_by_twollmann_2d(l, spacing, border_thickness)
- l[l > 0] = 1
- l[borders == 1] = 2
- l_itk = sitk.GetImageFromArray(l.astype(np.uint8)[None])
- l_itk.SetSpacing(list(spacing)[::-1] + [999])
- sitk.WriteImage(l_itk, anno_out)
-
-
-def generate_disk(spacing, radius, dtype=int):
- radius_in_voxels = np.round(radius / np.array(spacing)).astype(int)
- n = 2 * radius_in_voxels + 1
- disk_iso = disk(max(n) * 2, dtype=np.float64)
- disk_resampled = resize(disk_iso, n, 1, 'constant', 0, clip=True, anti_aliasing=False, preserve_range=True)
- disk_resampled[disk_resampled > 0.5] = 1
- disk_resampled[disk_resampled <= 0.5] = 0
- return disk_resampled.astype(dtype)
-
-
-def generate_border_as_suggested_by_twollmann_2d(label_img: np.ndarray, spacing,
- border_thickness: float = 2) -> np.ndarray:
- border = np.zeros_like(label_img)
- selem = generate_disk(spacing, border_thickness)
- for l in np.unique(label_img):
- if l == 0: continue
- mask = (label_img == l).astype(int)
- eroded = erosion(mask, selem)
- border[(eroded == 0) & (mask != 0)] = 1
- return border
-
-
-def prepare_task(base, task_id, task_name, spacing, border_thickness: float = 15):
- p = Pool(16)
-
- foldername = "Task%03.0d_%s" % (task_id, task_name)
-
- out_base = join(nnUNet_raw_data, foldername)
- imagestr = join(out_base, "imagesTr")
- imagests = join(out_base, "imagesTs")
- labelstr = join(out_base, "labelsTr")
- maybe_mkdir_p(imagestr)
- maybe_mkdir_p(imagests)
- maybe_mkdir_p(labelstr)
-
- train_patient_names = []
- test_patient_names = []
- res = []
-
- for train_sequence in [i for i in subfolders(base + "_train", join=False) if not i.endswith("_GT")]:
- train_cases = subfiles(join(base + '_train', train_sequence), suffix=".tif", join=False)
- for t in train_cases:
- casename = train_sequence + "_" + t[:-4]
- img_file = join(base + '_train', train_sequence, t)
- lab_file = join(base + '_train', train_sequence + "_GT", "SEG", "man_seg" + t[1:])
- if not isfile(lab_file):
- continue
- img_out_base = join(imagestr, casename)
- anno_out = join(labelstr, casename + ".nii.gz")
- res.append(
- p.starmap_async(load_bmp_convert_to_nifti_borders_2d,
- ((img_file, lab_file, img_out_base, anno_out, spacing, border_thickness),)))
- train_patient_names.append(casename)
-
- for test_sequence in [i for i in subfolders(base + "_test", join=False) if not i.endswith("_GT")]:
- test_cases = subfiles(join(base + '_test', test_sequence), suffix=".tif", join=False)
- for t in test_cases:
- casename = test_sequence + "_" + t[:-4]
- img_file = join(base + '_test', test_sequence, t)
- lab_file = None
- img_out_base = join(imagests, casename)
- anno_out = None
- res.append(
- p.starmap_async(load_bmp_convert_to_nifti_borders_2d,
- ((img_file, lab_file, img_out_base, anno_out, spacing, border_thickness),)))
- test_patient_names.append(casename)
-
- _ = [i.get() for i in res]
-
- json_dict = {}
- json_dict['name'] = task_name
- json_dict['description'] = ""
- json_dict['tensorImageSize'] = "4D"
- json_dict['reference'] = ""
- json_dict['licence'] = ""
- json_dict['release'] = "0.0"
- json_dict['modality'] = {
- "0": "BF",
- }
- json_dict['labels'] = {
- "0": "background",
- "1": "cell",
- "2": "border",
- }
-
- json_dict['numTraining'] = len(train_patient_names)
- json_dict['numTest'] = len(test_patient_names)
- json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
- train_patient_names]
- json_dict['test'] = ["./imagesTs/%s.nii.gz" % i for i in test_patient_names]
-
- save_json(json_dict, os.path.join(out_base, "dataset.json"))
- p.close()
- p.join()
-
-
-def convert_to_instance_seg(arr: np.ndarray, spacing: tuple = (0.125, 0.125), small_center_threshold: int = 30,
- isolated_border_as_separate_instance_threshold=15):
- from skimage.morphology import label, dilation
-
- # we first identify centers that are too small and set them to be border. This should remove false positive instances
- objects = label((arr == 1).astype(int))
- for o in np.unique(objects):
- if o > 0 and np.sum(objects == o) <= small_center_threshold:
- arr[objects == o] = 2
-
- # 1 is core, 2 is border
- objects = label((arr == 1).astype(int))
- final = np.copy(objects)
- remaining_border = arr == 2
- current = np.copy(objects)
- dilated_mm = np.array((0, 0))
- spacing = np.array(spacing)
-
- while np.sum(remaining_border) > 0:
- strel_size = [0, 0]
- maximum_dilation = max(dilated_mm)
- for i in range(2):
- if spacing[i] == min(spacing):
- strel_size[i] = 1
- continue
- if dilated_mm[i] + spacing[i] / 2 < maximum_dilation:
- strel_size[i] = 1
- ball_here = disk(1)
-
- if strel_size[0] == 0: ball_here = ball_here[1:2]
- if strel_size[1] == 0: ball_here = ball_here[:, 1:2]
-
- #print(1)
- dilated = dilation(current, ball_here)
- diff = (current == 0) & (dilated != current)
- final[diff & remaining_border] = dilated[diff & remaining_border]
- remaining_border[diff] = 0
- current = dilated
- dilated_mm = [dilated_mm[i] + spacing[i] if strel_size[i] == 1 else dilated_mm[i] for i in range(2)]
-
- # what can happen is that a cell is so small that the network only predicted border and no core. This cell will be
- # fused with the nearest other instance, which we don't want. Therefore we identify isolated border predictions and
- # give them a separate instance id
- # we identify isolated border predictions by checking each foreground object in arr and see whether this object
- # also contains label 1
- max_label = np.max(final)
-
- foreground_objects = label((arr != 0).astype(int))
- for i in np.unique(foreground_objects):
- if i > 0 and (1 not in np.unique(arr[foreground_objects==i])):
- size_of_object = np.sum(foreground_objects==i)
- if size_of_object >= isolated_border_as_separate_instance_threshold:
- final[foreground_objects == i] = max_label + 1
- max_label += 1
- #print('yeah boi')
-
- return final.astype(np.uint32)
-
-
-def load_convert_to_instance_save(file_in: str, file_out: str, spacing):
- img = sitk.ReadImage(file_in)
- img_npy = sitk.GetArrayFromImage(img)
- out = convert_to_instance_seg(img_npy[0], spacing)[None]
- out_itk = sitk.GetImageFromArray(out.astype(np.int16))
- out_itk.CopyInformation(img)
- sitk.WriteImage(out_itk, file_out)
-
-
-def convert_folder_to_instanceseg(folder_in: str, folder_out: str, spacing, processes: int = 12):
- input_files = subfiles(folder_in, suffix=".nii.gz", join=False)
- maybe_mkdir_p(folder_out)
- output_files = [join(folder_out, i) for i in input_files]
- input_files = [join(folder_in, i) for i in input_files]
- p = Pool(processes)
- r = []
- for i, o in zip(input_files, output_files):
- r.append(
- p.starmap_async(
- load_convert_to_instance_save,
- ((i, o, spacing),)
- )
- )
- _ = [i.get() for i in r]
- p.close()
- p.join()
-
-
-def convert_to_tiff(nifti_image: str, output_name: str):
- npy = sitk.GetArrayFromImage(sitk.ReadImage(nifti_image))
- imsave(output_name, npy[0].astype(np.uint16), compress=6)
-
-
-if __name__ == "__main__":
- base = "/home/fabian/Downloads/Fluo-N2DH-SIM+"
- task_name = 'Fluo-N2DH-SIM'
- spacing = (0.125, 0.125)
-
- task_id = 999
- border_thickness = 0.7
- prepare_task(base, task_id, task_name, spacing, border_thickness)
-
- task_id = 89
- additional_time_steps = 4
- task_name = 'Fluo-N2DH-SIM_thickborder_time'
- full_taskname = 'Task%03.0d_' % task_id + task_name
- output_raw = join(nnUNet_raw_data, full_taskname)
- shutil.rmtree(output_raw)
- shutil.copytree(join(nnUNet_raw_data, 'Task999_Fluo-N2DH-SIM_thickborder'), output_raw)
-
- shutil.rmtree(join(nnUNet_raw_data, 'Task999_Fluo-N2DH-SIM_thickborder'))
-
- # now add additional time information
- for fld in ['imagesTr', 'imagesTs']:
- curr = join(output_raw, fld)
- for seq in ['01', '02']:
- images = subfiles(curr, prefix=seq, join=False)
- for i in images:
- current_timestep = int(i.split('_')[1][1:])
- renamed = join(curr, i.replace("_0000", "_%04.0d" % additional_time_steps))
- shutil.move(join(curr, i), renamed)
- for previous_timestep in range(-additional_time_steps, 0):
- # previous time steps will already have been processed and renamed!
- expected_filename = join(curr, seq + "_t%03.0d" % (
- current_timestep + previous_timestep) + "_%04.0d" % additional_time_steps + ".nii.gz")
- if not isfile(expected_filename):
- # create empty image
- img = sitk.ReadImage(renamed)
- empty = sitk.GetImageFromArray(np.zeros_like(sitk.GetArrayFromImage(img)))
- empty.CopyInformation(img)
- sitk.WriteImage(empty, join(curr, i.replace("_0000", "_%04.0d" % (
- additional_time_steps + previous_timestep))))
- else:
- shutil.copy(expected_filename, join(curr, i.replace("_0000", "_%04.0d" % (
- additional_time_steps + previous_timestep))))
- dataset = load_json(join(output_raw, 'dataset.json'))
- dataset['modality'] = {
- '0': 't_minus 4',
- '1': 't_minus 3',
- '2': 't_minus 2',
- '3': 't_minus 1',
- '4': 'frame of interest',
- }
- save_json(dataset, join(output_raw, 'dataset.json'))
-
- # we do not need custom splits since we train on all training cases
-
- # test set predictions are converted to instance seg with convert_folder_to_instanceseg
-
- # test set predictions are converted to tiff with convert_to_tiff
\ No newline at end of file
diff --git a/spaces/huolongguo10/huolongguo10-check_sec/app.py b/spaces/huolongguo10/huolongguo10-check_sec/app.py
deleted file mode 100644
index c4e856123ed12646c9a4133a6dd1fdca243149b2..0000000000000000000000000000000000000000
--- a/spaces/huolongguo10/huolongguo10-check_sec/app.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import gradio as gr
-import transformers
-from transformers import BertTokenizer, DataCollatorWithPadding
-from transformers import AutoModelForSequenceClassification
-tokenizer = BertTokenizer.from_pretrained('huolongguo10/check_sec')
-model = AutoModelForSequenceClassification.from_pretrained('huolongguo10/check_sec', num_labels=2)
-_tokenizer = BertTokenizer.from_pretrained('huolongguo10/check_sec_tiny')
-_model = AutoModelForSequenceClassification.from_pretrained('huolongguo10/check_sec_tiny', num_labels=2)
-import torch
-def check_each(text):
- inputs = tokenizer(text, return_tensors="pt",max_length=512)
- with torch.no_grad():
- logits = model(**inputs).logits
- predicted_class_id = logits.argmax().item()
- print(f'{logits.argmax().item()}:{text}')
- return 'secure' if predicted_class_id==0 else 'insecure'
-def _check_each(text):
- inputs = _tokenizer(text, return_tensors="pt",max_length=512)
- with torch.no_grad():
- logits = _model(**inputs).logits
- predicted_class_id = logits.argmax().item()
- print(f't-{logits.argmax().item()}:{text}')
- return 'secure' if predicted_class_id==0 else 'insecure'
-def _check(text):
- t=text
- while len(t)>512:
- if check_each(t[0:511])=='insecure':
- return 'insecure'
- t=t[512:]
- return check_each(t)
-def _check_tiny(text):
- t=text
- while len(t)>512:
- if _check_each(t[0:511])=='insecure':
- return 'insecure'
- t=t[512:]
- return _check_each(t)
-def check(text):
- return _check(text),_check_tiny(text)
-with gr.Blocks() as demo:
- text = gr.Textbox(label="Text")
- output = gr.Textbox(label="Output Box")
- _output = gr.Textbox(label="Output Box(By Tiny)")
- # org = gr.Textbox(label="By normal check")
- greet_btn = gr.Button("Check!")
- greet_btn.click(fn=check, inputs=text, outputs=[output,_output], api_name="check")
- gr.Markdown('''# check_sec
-检查web参数安全性,支持多种payload(v0.1.2)
-
-## 类型
-```
-LABEL_0: secure
-LABEL_1: insecure(可能包含payload)
-```
- ''')
-# gr.Interface.load("models/huolongguo10/check_sec").launch()
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/hysts/gan-control/app.py b/spaces/hysts/gan-control/app.py
deleted file mode 100644
index 360cd575bcdcf842378533ba39c4c86e7f0eb13f..0000000000000000000000000000000000000000
--- a/spaces/hysts/gan-control/app.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import functools
-import os
-import pathlib
-import shlex
-import subprocess
-import sys
-import tarfile
-
-import gradio as gr
-import huggingface_hub
-import numpy as np
-import PIL.Image
-import torch
-
-if os.getenv('SYSTEM') == 'spaces':
- with open('patch') as f:
- subprocess.run(shlex.split('patch -p1'), cwd='gan-control', stdin=f)
-
-sys.path.insert(0, 'gan-control/src')
-
-from gan_control.inference.controller import Controller
-
-TITLE = 'GAN-Control'
-DESCRIPTION = 'https://github.com/amazon-research/gan-control'
-
-
-def download_models() -> None:
- model_dir = pathlib.Path('controller_age015id025exp02hai04ori02gam15')
- if not model_dir.exists():
- path = huggingface_hub.hf_hub_download(
- 'public-data/gan-control',
- 'controller_age015id025exp02hai04ori02gam15.tar.gz')
- with tarfile.open(path) as f:
- f.extractall()
-
-
-@torch.inference_mode()
-def run(
- seed: int,
- truncation: float,
- yaw: int,
- pitch: int,
- age: int,
- hair_color_r: float,
- hair_color_g: float,
- hair_color_b: float,
- nrows: int,
- ncols: int,
- controller: Controller,
- device: torch.device,
-) -> PIL.Image.Image:
- seed = int(np.clip(seed, 0, np.iinfo(np.uint32).max))
- batch_size = nrows * ncols
- latent_size = controller.config.model_config['latent_size']
- latent = torch.from_numpy(
- np.random.RandomState(seed).randn(batch_size,
- latent_size)).float().to(device)
-
- initial_image_tensors, initial_latent_z, initial_latent_w = controller.gen_batch(
- latent=latent, truncation=truncation)
- res0 = controller.make_resized_grid_image(initial_image_tensors,
- nrow=ncols)
-
- pose_control = torch.tensor([[yaw, pitch, 0]], dtype=torch.float32)
- image_tensors, _, modified_latent_w = controller.gen_batch_by_controls(
- latent=initial_latent_w,
- input_is_latent=True,
- orientation=pose_control)
- res1 = controller.make_resized_grid_image(image_tensors, nrow=ncols)
-
- age_control = torch.tensor([[age]], dtype=torch.float32)
- image_tensors, _, modified_latent_w = controller.gen_batch_by_controls(
- latent=initial_latent_w, input_is_latent=True, age=age_control)
- res2 = controller.make_resized_grid_image(image_tensors, nrow=ncols)
-
- hair_color = torch.tensor([[hair_color_r, hair_color_g, hair_color_b]],
- dtype=torch.float32) / 255
- hair_color = torch.clamp(hair_color, 0, 1)
- image_tensors, _, modified_latent_w = controller.gen_batch_by_controls(
- latent=initial_latent_w, input_is_latent=True, hair=hair_color)
- res3 = controller.make_resized_grid_image(image_tensors, nrow=ncols)
-
- return res0, res1, res2, res3
-
-
-download_models()
-
-device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
-path = 'controller_age015id025exp02hai04ori02gam15/'
-controller = Controller(path, device)
-fn = functools.partial(run, controller=controller, device=device)
-
-gr.Interface(
- fn=fn,
- inputs=[
- gr.Slider(label='Seed', minimum=0, maximum=1000000, step=1, value=0),
- gr.Slider(label='Truncation',
- minimum=0,
- maximum=1,
- step=0.1,
- value=0.7),
- gr.Slider(label='Yaw', minimum=-90, maximum=90, step=1, value=30),
- gr.Slider(label='Pitch', minimum=-90, maximum=90, step=1, value=0),
- gr.Slider(label='Age', minimum=15, maximum=75, step=1, value=75),
- gr.Slider(label='Hair Color (R)',
- minimum=0,
- maximum=255,
- step=1,
- value=186),
- gr.Slider(label='Hair Color (G)',
- minimum=0,
- maximum=255,
- step=1,
- value=158),
- gr.Slider(label='Hair Color (B)',
- minimum=0,
- maximum=255,
- step=1,
- value=92),
- gr.Slider(label='Number of Rows',
- minimum=1,
- maximum=3,
- step=1,
- value=1),
- gr.Slider(label='Number of Columns',
- minimum=1,
- maximum=5,
- step=1,
- value=5),
- ],
- outputs=[
- gr.Image(label='Generated Image', type='pil'),
- gr.Image(label='Head Pose Controlled', type='pil'),
- gr.Image(label='Age Controlled', type='pil'),
- gr.Image(label='Hair Color Controlled', type='pil'),
- ],
- title=TITLE,
- description=DESCRIPTION,
-).queue(max_size=10).launch()
diff --git a/spaces/hysts/insightface-person-detection/app.py b/spaces/hysts/insightface-person-detection/app.py
deleted file mode 100644
index 2ec075ee3cad7f12866782288a970c4fda78126b..0000000000000000000000000000000000000000
--- a/spaces/hysts/insightface-person-detection/app.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import functools
-import pathlib
-
-import cv2
-import gradio as gr
-import huggingface_hub
-import insightface
-import numpy as np
-import onnxruntime as ort
-
-TITLE = 'insightface Person Detection'
-DESCRIPTION = 'https://github.com/deepinsight/insightface/tree/master/examples/person_detection'
-
-
-def load_model():
- path = huggingface_hub.hf_hub_download('public-data/insightface',
- 'models/scrfd_person_2.5g.onnx')
- options = ort.SessionOptions()
- options.intra_op_num_threads = 8
- options.inter_op_num_threads = 8
- session = ort.InferenceSession(
- path,
- sess_options=options,
- providers=['CPUExecutionProvider', 'CUDAExecutionProvider'])
- model = insightface.model_zoo.retinaface.RetinaFace(model_file=path,
- session=session)
- return model
-
-
-def detect_person(
- img: np.ndarray, detector: insightface.model_zoo.retinaface.RetinaFace
-) -> tuple[np.ndarray, np.ndarray]:
- bboxes, kpss = detector.detect(img)
- bboxes = np.round(bboxes[:, :4]).astype(int)
- kpss = np.round(kpss).astype(int)
- kpss[:, :, 0] = np.clip(kpss[:, :, 0], 0, img.shape[1])
- kpss[:, :, 1] = np.clip(kpss[:, :, 1], 0, img.shape[0])
- vbboxes = bboxes.copy()
- vbboxes[:, 0] = kpss[:, 0, 0]
- vbboxes[:, 1] = kpss[:, 0, 1]
- vbboxes[:, 2] = kpss[:, 4, 0]
- vbboxes[:, 3] = kpss[:, 4, 1]
- return bboxes, vbboxes
-
-
-def visualize(image: np.ndarray, bboxes: np.ndarray,
- vbboxes: np.ndarray) -> np.ndarray:
- res = image.copy()
- for i in range(bboxes.shape[0]):
- bbox = bboxes[i]
- vbbox = vbboxes[i]
- x1, y1, x2, y2 = bbox
- vx1, vy1, vx2, vy2 = vbbox
- cv2.rectangle(res, (x1, y1), (x2, y2), (0, 255, 0), 1)
- alpha = 0.8
- color = (255, 0, 0)
- for c in range(3):
- res[vy1:vy2, vx1:vx2,
- c] = res[vy1:vy2, vx1:vx2,
- c] * alpha + color[c] * (1.0 - alpha)
- cv2.circle(res, (vx1, vy1), 1, color, 2)
- cv2.circle(res, (vx1, vy2), 1, color, 2)
- cv2.circle(res, (vx2, vy1), 1, color, 2)
- cv2.circle(res, (vx2, vy2), 1, color, 2)
- return res
-
-
-def detect(image: np.ndarray, detector) -> np.ndarray:
- image = image[:, :, ::-1] # RGB -> BGR
- bboxes, vbboxes = detect_person(image, detector)
- res = visualize(image, bboxes, vbboxes)
- return res[:, :, ::-1] # BGR -> RGB
-
-
-detector = load_model()
-detector.prepare(-1, nms_thresh=0.5, input_size=(640, 640))
-fn = functools.partial(detect, detector=detector)
-
-image_dir = pathlib.Path('images')
-examples = [[path.as_posix()] for path in sorted(image_dir.glob('*.jpg'))]
-
-gr.Interface(
- fn=fn,
- inputs=gr.Image(label='Input', type='numpy'),
- outputs=gr.Image(label='Output', height=600),
- examples=examples,
- examples_per_page=30,
- title=TITLE,
- description=DESCRIPTION,
-).queue().launch()
diff --git a/spaces/hzy123/bingo/src/components/chat-history.tsx b/spaces/hzy123/bingo/src/components/chat-history.tsx
deleted file mode 100644
index feb81de66562edda8f40d3c0cc717202c92b6509..0000000000000000000000000000000000000000
--- a/spaces/hzy123/bingo/src/components/chat-history.tsx
+++ /dev/null
@@ -1,48 +0,0 @@
-import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons"
-
-export function ChatHistory() {
- return (
-
-
- 历史记录
-
-
-
-
-
-
-
-
-
-
-
-
无标题的聊天
-
-
上午1:42
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- )
-}
diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/README.md b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/README.md
deleted file mode 100644
index 2ee63a861229b68873561fa39bfa7c9a8b53b947..0000000000000000000000000000000000000000
--- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/README.md
+++ /dev/null
@@ -1,164 +0,0 @@
-# Distributed Arcface Training in Pytorch
-
-This is a deep learning library that makes face recognition efficient, and effective, which can train tens of millions
-identity on a single server.
-
-## Requirements
-
-- Install [pytorch](http://pytorch.org) (torch>=1.6.0), our doc for [install.md](docs/install.md).
-- `pip install -r requirements.txt`.
-- Download the dataset
- from [https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_](https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_)
- .
-
-## How to Training
-
-To train a model, run `train.py` with the path to the configs:
-
-### 1. Single node, 8 GPUs:
-
-```shell
-python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r50
-```
-
-### 2. Multiple nodes, each node 8 GPUs:
-
-Node 0:
-
-```shell
-python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr="ip1" --master_port=1234 train.py train.py configs/ms1mv3_r50
-```
-
-Node 1:
-
-```shell
-python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr="ip1" --master_port=1234 train.py train.py configs/ms1mv3_r50
-```
-
-### 3.Training resnet2060 with 8 GPUs:
-
-```shell
-python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r2060.py
-```
-
-## Model Zoo
-
-- The models are available for non-commercial research purposes only.
-- All models can be found in here.
-- [Baidu Yun Pan](https://pan.baidu.com/s/1CL-l4zWqsI1oDuEEYVhj-g): e8pw
-- [onedrive](https://1drv.ms/u/s!AswpsDO2toNKq0lWY69vN58GR6mw?e=p9Ov5d)
-
-### Performance on [**ICCV2021-MFR**](http://iccv21-mfr.com/)
-
-ICCV2021-MFR testset consists of non-celebrities so we can ensure that it has very few overlap with public available face
-recognition training set, such as MS1M and CASIA as they mostly collected from online celebrities.
-As the result, we can evaluate the FAIR performance for different algorithms.
-
-For **ICCV2021-MFR-ALL** set, TAR is measured on all-to-all 1:1 protocal, with FAR less than 0.000001(e-6). The
-globalised multi-racial testset contains 242,143 identities and 1,624,305 images.
-
-For **ICCV2021-MFR-MASK** set, TAR is measured on mask-to-nonmask 1:1 protocal, with FAR less than 0.0001(e-4).
-Mask testset contains 6,964 identities, 6,964 masked images and 13,928 non-masked images.
-There are totally 13,928 positive pairs and 96,983,824 negative pairs.
-
-| Datasets | backbone | Training throughout | Size / MB | **ICCV2021-MFR-MASK** | **ICCV2021-MFR-ALL** |
-| :---: | :--- | :--- | :--- |:--- |:--- |
-| MS1MV3 | r18 | - | 91 | **47.85** | **68.33** |
-| Glint360k | r18 | 8536 | 91 | **53.32** | **72.07** |
-| MS1MV3 | r34 | - | 130 | **58.72** | **77.36** |
-| Glint360k | r34 | 6344 | 130 | **65.10** | **83.02** |
-| MS1MV3 | r50 | 5500 | 166 | **63.85** | **80.53** |
-| Glint360k | r50 | 5136 | 166 | **70.23** | **87.08** |
-| MS1MV3 | r100 | - | 248 | **69.09** | **84.31** |
-| Glint360k | r100 | 3332 | 248 | **75.57** | **90.66** |
-| MS1MV3 | mobilefacenet | 12185 | 7.8 | **41.52** | **65.26** |
-| Glint360k | mobilefacenet | 11197 | 7.8 | **44.52** | **66.48** |
-
-### Performance on IJB-C and Verification Datasets
-
-| Datasets | backbone | IJBC(1e-05) | IJBC(1e-04) | agedb30 | cfp_fp | lfw | log |
-| :---: | :--- | :--- | :--- | :--- |:--- |:--- |:--- |
-| MS1MV3 | r18 | 92.07 | 94.66 | 97.77 | 97.73 | 99.77 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r18_fp16/training.log)|
-| MS1MV3 | r34 | 94.10 | 95.90 | 98.10 | 98.67 | 99.80 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r34_fp16/training.log)|
-| MS1MV3 | r50 | 94.79 | 96.46 | 98.35 | 98.96 | 99.83 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r50_fp16/training.log)|
-| MS1MV3 | r100 | 95.31 | 96.81 | 98.48 | 99.06 | 99.85 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r100_fp16/training.log)|
-| MS1MV3 | **r2060**| 95.34 | 97.11 | 98.67 | 99.24 | 99.87 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r2060_fp16/training.log)|
-| Glint360k |r18-0.1 | 93.16 | 95.33 | 97.72 | 97.73 | 99.77 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r18_fp16_0.1/training.log)|
-| Glint360k |r34-0.1 | 95.16 | 96.56 | 98.33 | 98.78 | 99.82 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r34_fp16_0.1/training.log)|
-| Glint360k |r50-0.1 | 95.61 | 96.97 | 98.38 | 99.20 | 99.83 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r50_fp16_0.1/training.log)|
-| Glint360k |r100-0.1 | 95.88 | 97.32 | 98.48 | 99.29 | 99.82 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r100_fp16_0.1/training.log)|
-
-[comment]: <> (More details see [model.md](docs/modelzoo.md) in docs.)
-
-
-## [Speed Benchmark](docs/speed_benchmark.md)
-
-**Arcface Torch** can train large-scale face recognition training set efficiently and quickly. When the number of
-classes in training sets is greater than 300K and the training is sufficient, partial fc sampling strategy will get same
-accuracy with several times faster training performance and smaller GPU memory.
-Partial FC is a sparse variant of the model parallel architecture for large sacle face recognition. Partial FC use a
-sparse softmax, where each batch dynamicly sample a subset of class centers for training. In each iteration, only a
-sparse part of the parameters will be updated, which can reduce a lot of GPU memory and calculations. With Partial FC,
-we can scale trainset of 29 millions identities, the largest to date. Partial FC also supports multi-machine distributed
-training and mixed precision training.
-
-
-
-More details see
-[speed_benchmark.md](docs/speed_benchmark.md) in docs.
-
-### 1. Training speed of different parallel methods (samples / second), Tesla V100 32GB * 8. (Larger is better)
-
-`-` means training failed because of gpu memory limitations.
-
-| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
-| :--- | :--- | :--- | :--- |
-|125000 | 4681 | 4824 | 5004 |
-|1400000 | **1672** | 3043 | 4738 |
-|5500000 | **-** | **1389** | 3975 |
-|8000000 | **-** | **-** | 3565 |
-|16000000 | **-** | **-** | 2679 |
-|29000000 | **-** | **-** | **1855** |
-
-### 2. GPU memory cost of different parallel methods (MB per GPU), Tesla V100 32GB * 8. (Smaller is better)
-
-| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
-| :--- | :--- | :--- | :--- |
-|125000 | 7358 | 5306 | 4868 |
-|1400000 | 32252 | 11178 | 6056 |
-|5500000 | **-** | 32188 | 9854 |
-|8000000 | **-** | **-** | 12310 |
-|16000000 | **-** | **-** | 19950 |
-|29000000 | **-** | **-** | 32324 |
-
-## Evaluation ICCV2021-MFR and IJB-C
-
-More details see [eval.md](docs/eval.md) in docs.
-
-## Test
-
-We tested many versions of PyTorch. Please create an issue if you are having trouble.
-
-- [x] torch 1.6.0
-- [x] torch 1.7.1
-- [x] torch 1.8.0
-- [x] torch 1.9.0
-
-## Citation
-
-```
-@inproceedings{deng2019arcface,
- title={Arcface: Additive angular margin loss for deep face recognition},
- author={Deng, Jiankang and Guo, Jia and Xue, Niannan and Zafeiriou, Stefanos},
- booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
- pages={4690--4699},
- year={2019}
-}
-@inproceedings{an2020partical_fc,
- title={Partial FC: Training 10 Million Identities on a Single Machine},
- author={An, Xiang and Zhu, Xuhan and Xiao, Yang and Wu, Lan and Zhang, Ming and Gao, Yuan and Qin, Bin and
- Zhang, Debing and Fu Ying},
- booktitle={Arxiv 2010.05222},
- year={2020}
-}
-```
diff --git a/spaces/iamironman4279/SadTalker/src/utils/audio.py b/spaces/iamironman4279/SadTalker/src/utils/audio.py
deleted file mode 100644
index 89433eb4c681112804fbed72b157700f553739a8..0000000000000000000000000000000000000000
--- a/spaces/iamironman4279/SadTalker/src/utils/audio.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import librosa
-import librosa.filters
-import numpy as np
-# import tensorflow as tf
-from scipy import signal
-from scipy.io import wavfile
-from src.utils.hparams import hparams as hp
-
-def load_wav(path, sr):
- return librosa.core.load(path, sr=sr)[0]
-
-def save_wav(wav, path, sr):
- wav *= 32767 / max(0.01, np.max(np.abs(wav)))
- #proposed by @dsmiller
- wavfile.write(path, sr, wav.astype(np.int16))
-
-def save_wavenet_wav(wav, path, sr):
- librosa.output.write_wav(path, wav, sr=sr)
-
-def preemphasis(wav, k, preemphasize=True):
- if preemphasize:
- return signal.lfilter([1, -k], [1], wav)
- return wav
-
-def inv_preemphasis(wav, k, inv_preemphasize=True):
- if inv_preemphasize:
- return signal.lfilter([1], [1, -k], wav)
- return wav
-
-def get_hop_size():
- hop_size = hp.hop_size
- if hop_size is None:
- assert hp.frame_shift_ms is not None
- hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
- return hop_size
-
-def linearspectrogram(wav):
- D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
- S = _amp_to_db(np.abs(D)) - hp.ref_level_db
-
- if hp.signal_normalization:
- return _normalize(S)
- return S
-
-def melspectrogram(wav):
- D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
- S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
-
- if hp.signal_normalization:
- return _normalize(S)
- return S
-
-def _lws_processor():
- import lws
- return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
-
-def _stft(y):
- if hp.use_lws:
- return _lws_processor(hp).stft(y).T
- else:
- return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
-
-##########################################################
-#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
-def num_frames(length, fsize, fshift):
- """Compute number of time frames of spectrogram
- """
- pad = (fsize - fshift)
- if length % fshift == 0:
- M = (length + pad * 2 - fsize) // fshift + 1
- else:
- M = (length + pad * 2 - fsize) // fshift + 2
- return M
-
-
-def pad_lr(x, fsize, fshift):
- """Compute left and right padding
- """
- M = num_frames(len(x), fsize, fshift)
- pad = (fsize - fshift)
- T = len(x) + 2 * pad
- r = (M - 1) * fshift + fsize - T
- return pad, pad + r
-##########################################################
-#Librosa correct padding
-def librosa_pad_lr(x, fsize, fshift):
- return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
-
-# Conversions
-_mel_basis = None
-
-def _linear_to_mel(spectogram):
- global _mel_basis
- if _mel_basis is None:
- _mel_basis = _build_mel_basis()
- return np.dot(_mel_basis, spectogram)
-
-def _build_mel_basis():
- assert hp.fmax <= hp.sample_rate // 2
- return librosa.filters.mel(sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels,
- fmin=hp.fmin, fmax=hp.fmax)
-
-def _amp_to_db(x):
- min_level = np.exp(hp.min_level_db / 20 * np.log(10))
- return 20 * np.log10(np.maximum(min_level, x))
-
-def _db_to_amp(x):
- return np.power(10.0, (x) * 0.05)
-
-def _normalize(S):
- if hp.allow_clipping_in_normalization:
- if hp.symmetric_mels:
- return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
- -hp.max_abs_value, hp.max_abs_value)
- else:
- return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
-
- assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
- if hp.symmetric_mels:
- return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
- else:
- return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
-
-def _denormalize(D):
- if hp.allow_clipping_in_normalization:
- if hp.symmetric_mels:
- return (((np.clip(D, -hp.max_abs_value,
- hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
- + hp.min_level_db)
- else:
- return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
-
- if hp.symmetric_mels:
- return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
- else:
- return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
diff --git a/spaces/iamstolas/STOLAS/src/components/markdown.tsx b/spaces/iamstolas/STOLAS/src/components/markdown.tsx
deleted file mode 100644
index d4491467a1f14d1d72e535caac9c40636054e5df..0000000000000000000000000000000000000000
--- a/spaces/iamstolas/STOLAS/src/components/markdown.tsx
+++ /dev/null
@@ -1,9 +0,0 @@
-import { FC, memo } from 'react'
-import ReactMarkdown, { Options } from 'react-markdown'
-
-export const MemoizedReactMarkdown: FC = memo(
- ReactMarkdown,
- (prevProps, nextProps) =>
- prevProps.children === nextProps.children &&
- prevProps.className === nextProps.className
-)
diff --git a/spaces/inamXcontru/PoeticTTS/Answers Key Payroll Accounting Project Chapter 7.30l A Practical Approach to Payroll Accounting.md b/spaces/inamXcontru/PoeticTTS/Answers Key Payroll Accounting Project Chapter 7.30l A Practical Approach to Payroll Accounting.md
deleted file mode 100644
index 6fa6ed28126efd0cdfc0fb178ebb1d699d3c2b08..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/Answers Key Payroll Accounting Project Chapter 7.30l A Practical Approach to Payroll Accounting.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Answers Key Payroll Accounting Project Chapter 7.30l Download ✦✦✦ https://gohhs.com/2uz4li
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/inamXcontru/PoeticTTS/Cinderella Movie In Hindi Dubbed Free [WORK] Download.md b/spaces/inamXcontru/PoeticTTS/Cinderella Movie In Hindi Dubbed Free [WORK] Download.md
deleted file mode 100644
index 32b806a9cfae27b35c9b48ebef95a2c7ba40885e..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/Cinderella Movie In Hindi Dubbed Free [WORK] Download.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-The prices of movies with English subtitles is usually very affordable because a lot of money is spent to make a movie. For example, let’s say the movie is a Western movie, and the movie is an action movie, and the movie stars Daniel Craig, or Tom Cruise, or any other famous movie star. Let’s say the movie has a budget of ten million dollars. So obviously, the cost to make the movie has been very high. Thus, it is necessary to charge as much as possible to make money for making the movie. Also, it is even more profitable for the movie to be made into Hindi so that it can be subtitled into Hindi and put into the Indian movie market. So, the cost of making the movie is reduced from a ten million dollar budget to a two million dollar budget.
-cinderella movie in hindi dubbed free download DOWNLOAD ····· https://gohhs.com/2uz4rS
-However, if the movie is just a Hindi dubbed English movie, and it does not have any famous actors, then there is no need to hire actors who would be paid a lot of money. So, a Hindi dubbed English movie made in India, which has actors who cost millions of dollars to hire would only cost a hundred thousand dollars to make. Also, if the movie has a budget of five million, then there is no need to spend ten million dollars to hire famous actors. So, the only thing that will be needed is to hire writers and producers who will charge a thousand dollars an hour. Thus, making a Hindi dubbed English movie costs only a small amount of money, and it is more lucrative to hire actors in India, make it in India, and then have it subtitled into Hindi and put into the Indian movie market.
-Youtube is one of the best destinations to watch movies online for free. It provides a rich platform to the users to view or download movies. The users can enjoy a variety of free movies and TV shows in different languages. The site keeps on adding more content every day. It also provides an option for its users to download videos and movies. The search option on the website is very effective. The users can choose from the latest movies available on the site and can also add it on their watchlist. The site also allows the users to create their account and can watch movies online.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/innat/VideoMAE/labels.py b/spaces/innat/VideoMAE/labels.py
deleted file mode 100644
index be959dbb5edae2ee233b2f85de63cee9b2bf943b..0000000000000000000000000000000000000000
--- a/spaces/innat/VideoMAE/labels.py
+++ /dev/null
@@ -1,682 +0,0 @@
-K400_label_map = {
- "abseiling": 0,
- "air_drumming": 1,
- "answering_questions": 2,
- "applauding": 3,
- "applying_cream": 4,
- "archery": 5,
- "arm_wrestling": 6,
- "arranging_flowers": 7,
- "assembling_computer": 8,
- "auctioning": 9,
- "baby_waking_up": 10,
- "baking_cookies": 11,
- "balloon_blowing": 12,
- "bandaging": 13,
- "barbequing": 14,
- "bartending": 15,
- "beatboxing": 16,
- "bee_keeping": 17,
- "belly_dancing": 18,
- "bench_pressing": 19,
- "bending_back": 20,
- "bending_metal": 21,
- "biking_through_snow": 22,
- "blasting_sand": 23,
- "blowing_glass": 24,
- "blowing_leaves": 25,
- "blowing_nose": 26,
- "blowing_out_candles": 27,
- "bobsledding": 28,
- "bookbinding": 29,
- "bouncing_on_trampoline": 30,
- "bowling": 31,
- "braiding_hair": 32,
- "breading_or_breadcrumbing": 33,
- "breakdancing": 34,
- "brush_painting": 35,
- "brushing_hair": 36,
- "brushing_teeth": 37,
- "building_cabinet": 38,
- "building_shed": 39,
- "bungee_jumping": 40,
- "busking": 41,
- "canoeing_or_kayaking": 42,
- "capoeira": 43,
- "carrying_baby": 44,
- "cartwheeling": 45,
- "carving_pumpkin": 46,
- "catching_fish": 47,
- "catching_or_throwing_baseball": 48,
- "catching_or_throwing_frisbee": 49,
- "catching_or_throwing_softball": 50,
- "celebrating": 51,
- "changing_oil": 52,
- "changing_wheel": 53,
- "checking_tires": 54,
- "cheerleading": 55,
- "chopping_wood": 56,
- "clapping": 57,
- "clay_pottery_making": 58,
- "clean_and_jerk": 59,
- "cleaning_floor": 60,
- "cleaning_gutters": 61,
- "cleaning_pool": 62,
- "cleaning_shoes": 63,
- "cleaning_toilet": 64,
- "cleaning_windows": 65,
- "climbing_a_rope": 66,
- "climbing_ladder": 67,
- "climbing_tree": 68,
- "contact_juggling": 69,
- "cooking_chicken": 70,
- "cooking_egg": 71,
- "cooking_on_campfire": 72,
- "cooking_sausages": 73,
- "counting_money": 74,
- "country_line_dancing": 75,
- "cracking_neck": 76,
- "crawling_baby": 77,
- "crossing_river": 78,
- "crying": 79,
- "curling_hair": 80,
- "cutting_nails": 81,
- "cutting_pineapple": 82,
- "cutting_watermelon": 83,
- "dancing_ballet": 84,
- "dancing_charleston": 85,
- "dancing_gangnam_style": 86,
- "dancing_macarena": 87,
- "deadlifting": 88,
- "decorating_the_christmas_tree": 89,
- "digging": 90,
- "dining": 91,
- "disc_golfing": 92,
- "diving_cliff": 93,
- "dodgeball": 94,
- "doing_aerobics": 95,
- "doing_laundry": 96,
- "doing_nails": 97,
- "drawing": 98,
- "dribbling_basketball": 99,
- "drinking": 100,
- "drinking_beer": 101,
- "drinking_shots": 102,
- "driving_car": 103,
- "driving_tractor": 104,
- "drop_kicking": 105,
- "drumming_fingers": 106,
- "dunking_basketball": 107,
- "dying_hair": 108,
- "eating_burger": 109,
- "eating_cake": 110,
- "eating_carrots": 111,
- "eating_chips": 112,
- "eating_doughnuts": 113,
- "eating_hotdog": 114,
- "eating_ice_cream": 115,
- "eating_spaghetti": 116,
- "eating_watermelon": 117,
- "egg_hunting": 118,
- "exercising_arm": 119,
- "exercising_with_an_exercise_ball": 120,
- "extinguishing_fire": 121,
- "faceplanting": 122,
- "feeding_birds": 123,
- "feeding_fish": 124,
- "feeding_goats": 125,
- "filling_eyebrows": 126,
- "finger_snapping": 127,
- "fixing_hair": 128,
- "flipping_pancake": 129,
- "flying_kite": 130,
- "folding_clothes": 131,
- "folding_napkins": 132,
- "folding_paper": 133,
- "front_raises": 134,
- "frying_vegetables": 135,
- "garbage_collecting": 136,
- "gargling": 137,
- "getting_a_haircut": 138,
- "getting_a_tattoo": 139,
- "giving_or_receiving_award": 140,
- "golf_chipping": 141,
- "golf_driving": 142,
- "golf_putting": 143,
- "grinding_meat": 144,
- "grooming_dog": 145,
- "grooming_horse": 146,
- "gymnastics_tumbling": 147,
- "hammer_throw": 148,
- "headbanging": 149,
- "headbutting": 150,
- "high_jump": 151,
- "high_kick": 152,
- "hitting_baseball": 153,
- "hockey_stop": 154,
- "holding_snake": 155,
- "hopscotch": 156,
- "hoverboarding": 157,
- "hugging": 158,
- "hula_hooping": 159,
- "hurdling": 160,
- "hurling_(sport)": 161,
- "ice_climbing": 162,
- "ice_fishing": 163,
- "ice_skating": 164,
- "ironing": 165,
- "javelin_throw": 166,
- "jetskiing": 167,
- "jogging": 168,
- "juggling_balls": 169,
- "juggling_fire": 170,
- "juggling_soccer_ball": 171,
- "jumping_into_pool": 172,
- "jumpstyle_dancing": 173,
- "kicking_field_goal": 174,
- "kicking_soccer_ball": 175,
- "kissing": 176,
- "kitesurfing": 177,
- "knitting": 178,
- "krumping": 179,
- "laughing": 180,
- "laying_bricks": 181,
- "long_jump": 182,
- "lunge": 183,
- "making_a_cake": 184,
- "making_a_sandwich": 185,
- "making_bed": 186,
- "making_jewelry": 187,
- "making_pizza": 188,
- "making_snowman": 189,
- "making_sushi": 190,
- "making_tea": 191,
- "marching": 192,
- "massaging_back": 193,
- "massaging_feet": 194,
- "massaging_legs": 195,
- "massaging_person's_head": 196,
- "milking_cow": 197,
- "mopping_floor": 198,
- "motorcycling": 199,
- "moving_furniture": 200,
- "mowing_lawn": 201,
- "news_anchoring": 202,
- "opening_bottle": 203,
- "opening_present": 204,
- "paragliding": 205,
- "parasailing": 206,
- "parkour": 207,
- "passing_American_football_(in_game)": 208,
- "passing_American_football_(not_in_game)": 209,
- "peeling_apples": 210,
- "peeling_potatoes": 211,
- "petting_animal_(not_cat)": 212,
- "petting_cat": 213,
- "picking_fruit": 214,
- "planting_trees": 215,
- "plastering": 216,
- "playing_accordion": 217,
- "playing_badminton": 218,
- "playing_bagpipes": 219,
- "playing_basketball": 220,
- "playing_bass_guitar": 221,
- "playing_cards": 222,
- "playing_cello": 223,
- "playing_chess": 224,
- "playing_clarinet": 225,
- "playing_controller": 226,
- "playing_cricket": 227,
- "playing_cymbals": 228,
- "playing_didgeridoo": 229,
- "playing_drums": 230,
- "playing_flute": 231,
- "playing_guitar": 232,
- "playing_harmonica": 233,
- "playing_harp": 234,
- "playing_ice_hockey": 235,
- "playing_keyboard": 236,
- "playing_kickball": 237,
- "playing_monopoly": 238,
- "playing_organ": 239,
- "playing_paintball": 240,
- "playing_piano": 241,
- "playing_poker": 242,
- "playing_recorder": 243,
- "playing_saxophone": 244,
- "playing_squash_or_racquetball": 245,
- "playing_tennis": 246,
- "playing_trombone": 247,
- "playing_trumpet": 248,
- "playing_ukulele": 249,
- "playing_violin": 250,
- "playing_volleyball": 251,
- "playing_xylophone": 252,
- "pole_vault": 253,
- "presenting_weather_forecast": 254,
- "pull_ups": 255,
- "pumping_fist": 256,
- "pumping_gas": 257,
- "punching_bag": 258,
- "punching_person_(boxing)": 259,
- "push_up": 260,
- "pushing_car": 261,
- "pushing_cart": 262,
- "pushing_wheelchair": 263,
- "reading_book": 264,
- "reading_newspaper": 265,
- "recording_music": 266,
- "riding_a_bike": 267,
- "riding_camel": 268,
- "riding_elephant": 269,
- "riding_mechanical_bull": 270,
- "riding_mountain_bike": 271,
- "riding_mule": 272,
- "riding_or_walking_with_horse": 273,
- "riding_scooter": 274,
- "riding_unicycle": 275,
- "ripping_paper": 276,
- "robot_dancing": 277,
- "rock_climbing": 278,
- "rock_scissors_paper": 279,
- "roller_skating": 280,
- "running_on_treadmill": 281,
- "sailing": 282,
- "salsa_dancing": 283,
- "sanding_floor": 284,
- "scrambling_eggs": 285,
- "scuba_diving": 286,
- "setting_table": 287,
- "shaking_hands": 288,
- "shaking_head": 289,
- "sharpening_knives": 290,
- "sharpening_pencil": 291,
- "shaving_head": 292,
- "shaving_legs": 293,
- "shearing_sheep": 294,
- "shining_shoes": 295,
- "shooting_basketball": 296,
- "shooting_goal_(soccer)": 297,
- "shot_put": 298,
- "shoveling_snow": 299,
- "shredding_paper": 300,
- "shuffling_cards": 301,
- "side_kick": 302,
- "sign_language_interpreting": 303,
- "singing": 304,
- "situp": 305,
- "skateboarding": 306,
- "ski_jumping": 307,
- "skiing_(not_slalom_or_crosscountry)": 308,
- "skiing_crosscountry": 309,
- "skiing_slalom": 310,
- "skipping_rope": 311,
- "skydiving": 312,
- "slacklining": 313,
- "slapping": 314,
- "sled_dog_racing": 315,
- "smoking": 316,
- "smoking_hookah": 317,
- "snatch_weight_lifting": 318,
- "sneezing": 319,
- "sniffing": 320,
- "snorkeling": 321,
- "snowboarding": 322,
- "snowkiting": 323,
- "snowmobiling": 324,
- "somersaulting": 325,
- "spinning_poi": 326,
- "spray_painting": 327,
- "spraying": 328,
- "springboard_diving": 329,
- "squat": 330,
- "sticking_tongue_out": 331,
- "stomping_grapes": 332,
- "stretching_arm": 333,
- "stretching_leg": 334,
- "strumming_guitar": 335,
- "surfing_crowd": 336,
- "surfing_water": 337,
- "sweeping_floor": 338,
- "swimming_backstroke": 339,
- "swimming_breast_stroke": 340,
- "swimming_butterfly_stroke": 341,
- "swing_dancing": 342,
- "swinging_legs": 343,
- "swinging_on_something": 344,
- "sword_fighting": 345,
- "tai_chi": 346,
- "taking_a_shower": 347,
- "tango_dancing": 348,
- "tap_dancing": 349,
- "tapping_guitar": 350,
- "tapping_pen": 351,
- "tasting_beer": 352,
- "tasting_food": 353,
- "testifying": 354,
- "texting": 355,
- "throwing_axe": 356,
- "throwing_ball": 357,
- "throwing_discus": 358,
- "tickling": 359,
- "tobogganing": 360,
- "tossing_coin": 361,
- "tossing_salad": 362,
- "training_dog": 363,
- "trapezing": 364,
- "trimming_or_shaving_beard": 365,
- "trimming_trees": 366,
- "triple_jump": 367,
- "tying_bow_tie": 368,
- "tying_knot_(not_on_a_tie)": 369,
- "tying_tie": 370,
- "unboxing": 371,
- "unloading_truck": 372,
- "using_computer": 373,
- "using_remote_controller_(not_gaming)": 374,
- "using_segway": 375,
- "vault": 376,
- "waiting_in_line": 377,
- "walking_the_dog": 378,
- "washing_dishes": 379,
- "washing_feet": 380,
- "washing_hair": 381,
- "washing_hands": 382,
- "water_skiing": 383,
- "water_sliding": 384,
- "watering_plants": 385,
- "waxing_back": 386,
- "waxing_chest": 387,
- "waxing_eyebrows": 388,
- "waxing_legs": 389,
- "weaving_basket": 390,
- "welding": 391,
- "whistling": 392,
- "windsurfing": 393,
- "wrapping_present": 394,
- "wrestling": 395,
- "writing": 396,
- "yawning": 397,
- "yoga": 398,
- "zumba": 399,
-}
-SSv2_label_map = {
- "Approaching something with your camera": 0,
- "Attaching something to something": 1,
- "Bending something so that it deforms": 2,
- "Bending something until it breaks": 3,
- "Burying something in something": 4,
- "Closing something": 5,
- "Covering something with something": 6,
- "Digging something out of something": 7,
- "Dropping something behind something": 8,
- "Dropping something in front of something": 9,
- "Dropping something into something": 10,
- "Dropping something next to something": 11,
- "Dropping something onto something": 12,
- "Failing to put something into something because something does not fit": 13,
- "Folding something": 14,
- "Hitting something with something": 15,
- "Holding something": 16,
- "Holding something behind something": 17,
- "Holding something in front of something": 18,
- "Holding something next to something": 19,
- "Holding something over something": 20,
- "Laying something on the table on its side, not upright": 21,
- "Letting something roll along a flat surface": 22,
- "Letting something roll down a slanted surface": 23,
- "Letting something roll up a slanted surface, so it rolls back down": 24,
- "Lifting a surface with something on it but not enough for it to slide down": 25,
- "Lifting a surface with something on it until it starts sliding down": 26,
- "Lifting something up completely without letting it drop down": 27,
- "Lifting something up completely, then letting it drop down": 28,
- "Lifting something with something on it": 29,
- "Lifting up one end of something without letting it drop down": 30,
- "Lifting up one end of something, then letting it drop down": 31,
- "Moving away from something with your camera": 32,
- "Moving part of something": 33,
- "Moving something across a surface until it falls down": 34,
- "Moving something across a surface without it falling down": 35,
- "Moving something and something away from each other": 36,
- "Moving something and something closer to each other": 37,
- "Moving something and something so they collide with each other": 38,
- "Moving something and something so they pass each other": 39,
- "Moving something away from something": 40,
- "Moving something away from the camera": 41,
- "Moving something closer to something": 42,
- "Moving something down": 43,
- "Moving something towards the camera": 44,
- "Moving something up": 45,
- "Opening something": 46,
- "Picking something up": 47,
- "Piling something up": 48,
- "Plugging something into something": 49,
- "Plugging something into something but pulling it right out as you remove your hand": 50,
- "Poking a hole into some substance": 51,
- "Poking a hole into something soft": 52,
- "Poking a stack of something so the stack collapses": 53,
- "Poking a stack of something without the stack collapsing": 54,
- "Poking something so it slightly moves": 55,
- "Poking something so lightly that it doesn't or almost doesn't move": 56,
- "Poking something so that it falls over": 57,
- "Poking something so that it spins around": 58,
- "Pouring something into something": 59,
- "Pouring something into something until it overflows": 60,
- "Pouring something onto something": 61,
- "Pouring something out of something": 62,
- "Pretending or failing to wipe something off of something": 63,
- "Pretending or trying and failing to twist something": 64,
- "Pretending to be tearing something that is not tearable": 65,
- "Pretending to close something without actually closing it": 66,
- "Pretending to open something without actually opening it": 67,
- "Pretending to pick something up": 68,
- "Pretending to poke something": 69,
- "Pretending to pour something out of something, but something is empty": 70,
- "Pretending to put something behind something": 71,
- "Pretending to put something into something": 72,
- "Pretending to put something next to something": 73,
- "Pretending to put something on a surface": 74,
- "Pretending to put something onto something": 75,
- "Pretending to put something underneath something": 76,
- "Pretending to scoop something up with something": 77,
- "Pretending to spread air onto something": 78,
- "Pretending to sprinkle air onto something": 79,
- "Pretending to squeeze something": 80,
- "Pretending to take something from somewhere": 81,
- "Pretending to take something out of something": 82,
- "Pretending to throw something": 83,
- "Pretending to turn something upside down": 84,
- "Pulling something from behind of something": 85,
- "Pulling something from left to right": 86,
- "Pulling something from right to left": 87,
- "Pulling something onto something": 88,
- "Pulling something out of something": 89,
- "Pulling two ends of something but nothing happens": 90,
- "Pulling two ends of something so that it gets stretched": 91,
- "Pulling two ends of something so that it separates into two pieces": 92,
- "Pushing something from left to right": 93,
- "Pushing something from right to left": 94,
- "Pushing something off of something": 95,
- "Pushing something onto something": 96,
- "Pushing something so it spins": 97,
- "Pushing something so that it almost falls off but doesn't": 98,
- "Pushing something so that it falls off the table": 99,
- "Pushing something so that it slightly moves": 100,
- "Pushing something with something": 101,
- "Putting number of something onto something": 102,
- "Putting something and something on the table": 103,
- "Putting something behind something": 104,
- "Putting something in front of something": 105,
- "Putting something into something": 106,
- "Putting something next to something": 107,
- "Putting something on a flat surface without letting it roll": 108,
- "Putting something on a surface": 109,
- "Putting something on the edge of something so it is not supported and falls down": 110,
- "Putting something onto a slanted surface but it doesn't glide down": 111,
- "Putting something onto something": 112,
- "Putting something onto something else that cannot support it so it falls down": 113,
- "Putting something similar to other things that are already on the table": 114,
- "Putting something that can't roll onto a slanted surface, so it slides down": 115,
- "Putting something that can't roll onto a slanted surface, so it stays where it is": 116,
- "Putting something that cannot actually stand upright upright on the table, so it falls on its side": 117,
- "Putting something underneath something": 118,
- "Putting something upright on the table": 119,
- "Putting something, something and something on the table": 120,
- "Removing something, revealing something behind": 121,
- "Rolling something on a flat surface": 122,
- "Scooping something up with something": 123,
- "Showing a photo of something to the camera": 124,
- "Showing something behind something": 125,
- "Showing something next to something": 126,
- "Showing something on top of something": 127,
- "Showing something to the camera": 128,
- "Showing that something is empty": 129,
- "Showing that something is inside something": 130,
- "Something being deflected from something": 131,
- "Something colliding with something and both are being deflected": 132,
- "Something colliding with something and both come to a halt": 133,
- "Something falling like a feather or paper": 134,
- "Something falling like a rock": 135,
- "Spilling something behind something": 136,
- "Spilling something next to something": 137,
- "Spilling something onto something": 138,
- "Spinning something so it continues spinning": 139,
- "Spinning something that quickly stops spinning": 140,
- "Spreading something onto something": 141,
- "Sprinkling something onto something": 142,
- "Squeezing something": 143,
- "Stacking number of something": 144,
- "Stuffing something into something": 145,
- "Taking one of many similar things on the table": 146,
- "Taking something from somewhere": 147,
- "Taking something out of something": 148,
- "Tearing something into two pieces": 149,
- "Tearing something just a little bit": 150,
- "Throwing something": 151,
- "Throwing something against something": 152,
- "Throwing something in the air and catching it": 153,
- "Throwing something in the air and letting it fall": 154,
- "Throwing something onto a surface": 155,
- "Tilting something with something on it slightly so it doesn't fall down": 156,
- "Tilting something with something on it until it falls off": 157,
- "Tipping something over": 158,
- "Tipping something with something in it over, so something in it falls out": 159,
- "Touching (without moving) part of something": 160,
- "Trying but failing to attach something to something because it doesn't stick": 161,
- "Trying to bend something unbendable so nothing happens": 162,
- "Trying to pour something into something, but missing so it spills next to it": 163,
- "Turning something upside down": 164,
- "Turning the camera downwards while filming something": 165,
- "Turning the camera left while filming something": 166,
- "Turning the camera right while filming something": 167,
- "Turning the camera upwards while filming something": 168,
- "Twisting (wringing) something wet until water comes out": 169,
- "Twisting something": 170,
- "Uncovering something": 171,
- "Unfolding something": 172,
- "Wiping something off of something": 173,
- "Moving something and something so they overlap each other": 174,
-}
-UCF_label_map = {
- "ApplyEyeMakeup": 0,
- "ApplyLipstick": 1,
- "Archery": 2,
- "BabyCrawling": 3,
- "BalanceBeam": 4,
- "BandMarching": 5,
- "BaseballPitch": 6,
- "Basketball": 7,
- "BasketballDunk": 8,
- "BenchPress": 9,
- "Biking": 10,
- "Billiards": 11,
- "BlowDryHair": 12,
- "BlowingCandles": 13,
- "BodyWeightSquats": 14,
- "Bowling": 15,
- "BoxingPunchingBag": 16,
- "BoxingSpeedBag": 17,
- "BreastStroke": 18,
- "BrushingTeeth": 19,
- "CleanAndJerk": 20,
- "CliffDiving": 21,
- "CricketBowling": 22,
- "CricketShot": 23,
- "CuttingInKitchen": 24,
- "Diving": 25,
- "Drumming": 26,
- "Fencing": 27,
- "FieldHockeyPenalty": 28,
- "FloorGymnastics": 29,
- "FrisbeeCatch": 30,
- "FrontCrawl": 31,
- "GolfSwing": 32,
- "Haircut": 33,
- "Hammering": 34,
- "HammerThrow": 35,
- "HandstandPushups": 36,
- "HandstandWalking": 37,
- "HeadMassage": 38,
- "HighJump": 39,
- "HorseRace": 40,
- "HorseRiding": 41,
- "HulaHoop": 42,
- "IceDancing": 43,
- "JavelinThrow": 44,
- "JugglingBalls": 45,
- "JumpingJack": 46,
- "JumpRope": 47,
- "Kayaking": 48,
- "Knitting": 49,
- "LongJump": 50,
- "Lunges": 51,
- "MilitaryParade": 52,
- "Mixing": 53,
- "MoppingFloor": 54,
- "Nunchucks": 55,
- "ParallelBars": 56,
- "PizzaTossing": 57,
- "PlayingCello": 58,
- "PlayingDaf": 59,
- "PlayingDhol": 60,
- "PlayingFlute": 61,
- "PlayingGuitar": 62,
- "PlayingPiano": 63,
- "PlayingSitar": 64,
- "PlayingTabla": 65,
- "PlayingViolin": 66,
- "PoleVault": 67,
- "PommelHorse": 68,
- "PullUps": 69,
- "Punch": 70,
- "PushUps": 71,
- "Rafting": 72,
- "RockClimbingIndoor": 73,
- "RopeClimbing": 74,
- "Rowing": 75,
- "SalsaSpin": 76,
- "ShavingBeard": 77,
- "Shotput": 78,
- "SkateBoarding": 79,
- "Skiing": 80,
- "Skijet": 81,
- "SkyDiving": 82,
- "SoccerJuggling": 83,
- "SoccerPenalty": 84,
- "StillRings": 85,
- "SumoWrestling": 86,
- "Surfing": 87,
- "Swing": 88,
- "TableTennisShot": 89,
- "TaiChi": 90,
- "TennisSwing": 91,
- "ThrowDiscus": 92,
- "TrampolineJumping": 93,
- "Typing": 94,
- "UnevenBars": 95,
- "VolleyballSpiking": 96,
- "WalkingWithDog": 97,
- "WallPushups": 98,
- "WritingOnBoard": 99,
- "YoYo": 100,
-}
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Cs6 Master Collection Winmac - Xforce.zip 137 Kb.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Cs6 Master Collection Winmac - Xforce.zip 137 Kb.md
deleted file mode 100644
index 48b5565a53edeb8139c56f160697cb8d6a860432..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Adobe Cs6 Master Collection Winmac - Xforce.zip 137 Kb.md
+++ /dev/null
@@ -1,6 +0,0 @@
-adobe cs6 master collection winmac - xforce.zip 137 kb Download ☑ https://urlin.us/2uExvI
-
-Adobe Cs6 Master Collection Winmac - Xforce.zip 137 Kb adobe+cs6+master+collection+winmac+-+xforce.zip , adobe cs6 master collection ... 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Airfader Server 2.2 Crack HOT!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Airfader Server 2.2 Crack HOT!.md
deleted file mode 100644
index 03ae5a845a103d1fd312246456840fd0d3990c60..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Airfader Server 2.2 Crack HOT!.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-AirFader Server 2.2 Crack: A Risky Way to Control Your Digital Console
-AirFader is a touchscreen mixing software that allows you to remotely control Yamaha digital consoles from your Android or iPad devices[^2^]. It is designed to be a reliable and efficient tool for audio engineers and musicians who need to adjust their personal monitor mixes.
-airfader server 2.2 crack Download Zip ❤❤❤ https://urlin.us/2uEwp1
-AirFader Server 2.2 is the latest version of the software that runs on a Windows computer and acts as a server for the mobile devices. It supports Yamaha 01V96, LS9, and M7CL consoles and offers features such as channel naming, scene recall, mute groups, and more[^2^]. The software costs $149.00 and can be purchased from the official website[^2^].
-However, some people may be tempted to look for a cracked version of AirFader Server 2.2 that bypasses the license verification and allows them to use the software for free. This is a risky and illegal way to obtain the software, as it may expose your computer and devices to malware, viruses, or other security threats. Moreover, it may damage your console or compromise your audio quality by introducing glitches, errors, or latency. Furthermore, it may violate the terms of service of AirFader and Yamaha and result in legal consequences.
-Therefore, it is strongly advised to avoid using AirFader Server 2.2 crack and instead purchase the software from the official website. This way, you can enjoy the full benefits of AirFader without risking your equipment, data, or reputation.
If you are interested in purchasing AirFader Server 2.2, you can visit the official website and click on the "Buy Now" button. You will be redirected to a secure payment page where you can choose your preferred payment method and enter your details. After completing the payment, you will receive an email with a download link and a license key. You can then install the software on your Windows computer and activate it with the license key. You will also be able to download the mobile app for Android or iPad from the Google Play Store or the App Store respectively.
-AirFader has many advantages over other mixing software, such as its simplicity, reliability, and efficiency. It is designed to be easy to use and intuitive, with a touch-friendly interface that mimics the layout of the console. It is also compatible with most Windows computers and mobile devices, without requiring any additional hardware or software. It is also optimized to minimize network traffic and latency, ensuring a smooth and stable performance. Moreover, it offers features that are not available on the console itself, such as channel naming, scene recall, mute groups, and more.
-
-AirFader is currently only compatible with Yamaha 01V96, LS9, and M7CL consoles. However, the developers are working on adding support for other consoles in the future. You can check the official website for updates and news on AirFader's development. You can also contact the developers if you have any suggestions or feedback on how to improve AirFader.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Alien Shooter 3 Free Download Full Version For Pc _VERIFIED_.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Alien Shooter 3 Free Download Full Version For Pc _VERIFIED_.md
deleted file mode 100644
index 82982b475373a3fb2f2a409c19c5611bd94a3076..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Alien Shooter 3 Free Download Full Version For Pc _VERIFIED_.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Alien Shooter 3 Free Download Full Version For Pc Download File 🌟 https://urlin.us/2uEwsO
-
-Choose from 10 skill levels versus computer in this portable chess game. OS: Windows File Size: 73 KB « Board ». 6,710 downloads. 8.9 / 10. European ... 1fdad05405
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Essay On Internet In Urdu Language !!INSTALL!!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Essay On Internet In Urdu Language !!INSTALL!!.md
deleted file mode 100644
index 335a8d734d20570e4bb9d0565589e30f74582005..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Essay On Internet In Urdu Language !!INSTALL!!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Essay On Internet In Urdu Language Download File ○ https://urlin.us/2uExrZ
-
-Internet ke fayde aur nuksan in urdu essay ... berlin airport restaurant business plan south africa essay on importance of english language in modern world pdf, ... 1fdad05405
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Fsx Aerosoft Manhattan X Crack.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Fsx Aerosoft Manhattan X Crack.md
deleted file mode 100644
index 3977ddb3aedef9b6591b70db2a410c0ec19b5f58..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Fsx Aerosoft Manhattan X Crack.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Fsx Aerosoft Manhattan X Crack Download ☑ https://urlin.us/2uEyDA
-
-Download.free.software.full.vers ion.and.android..11 ... pc fix keygen 3 days zoo ... Minecraft 1.1.0 Cracked [portable] [Updatable] [Online] 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/BETTER Download Saints Row 2 Highly Compressed.md b/spaces/inreVtussa/clothingai/Examples/BETTER Download Saints Row 2 Highly Compressed.md
deleted file mode 100644
index bfa7d27f25f989b93fe6f09814cd412c07adf2f2..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/BETTER Download Saints Row 2 Highly Compressed.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-Saints Row: The game is complete in most aspects. The game has an excellent graphics,very engaging gameplay and an outstanding soundtrack.If you want to play an open-world game that has a lot of RPG elements to it this is a must.If you are an open-world game fan this is a must for you,you should also download it.This is a game that not only has a lot of open-world elements to it but also a lot of RPG elements,it also has a few unlockable characters,such as the player and various npc’s you can talk to and buy.There are also a lot of powerups you can collect like bionic arm upgrades,superpowers,fast cars and cool side missions,as well as other things.
-Saints Row 2 for Ps2 is the second part of the Saints Row series. It has several problems like I posted in my other post. It looks nice, you get a lot of weapons and more choices, side-missions, superpowers etc. but unfortunately it becomes rather repetitive in the last part. You get the same missions over and over again and you get repeated things in them, like stealing cops cars or that. I found that part a bit boring since the first part was such a rush to steal stuff and run around. I’ve also got a few problems with the controller and it is nothing that can be changed. The code could be more localized, to make all those things you can do, a bit more user friendly. That would have been much more fun.
-Download Saints Row 2 Highly Compressed Download Zip –––––>>> https://tiurll.com/2uClNN
-Saints Row 2 is a game of fun and action in which players assume the role of Johnny Gat, a man with a rare photographic memory. The player explores the mean streets of Stilwater, a city where crime and corruption are always in style. Players acquire powers, influence, and wealth by using guile, resourcefulness, and cunning in numerous interactive situations. Players make use of a wide array of weapons, powers, vehicles, and other items to effectively carry out crime and deal with regular problems. The goal of the game is to settle scores, take revenge, and become the ultimate gangster.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/CPU-Tweaker 1.1 Setup TOP Free.md b/spaces/inreVtussa/clothingai/Examples/CPU-Tweaker 1.1 Setup TOP Free.md
deleted file mode 100644
index 9458abbceaf8f88f899478b2b157bd1e9d65fac0..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/CPU-Tweaker 1.1 Setup TOP Free.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-CPU-Tweaker 1.1: A Free and Lightweight Tool to Boost Your CPU Performance
-If you are looking for a simple and effective way to overclock your CPU and get more speed out of your computer, you might want to try CPU-Tweaker 1.1. This is a free and lightweight tool that allows you to tweak the CPU timings and improve the performance of your processor.
-CPU-Tweaker 1.1 is designed for CPUs with an integrated memory controller (IMC), such as Intel Core i3, i5, i7, and AMD Phenom and Ryzen processors. It can read and change the principal and secondary timings of your memory, as well as the voltage and frequency settings of your CPU.
-CPU-Tweaker 1.1 Setup Free DOWNLOAD › https://tiurll.com/2uCivC
-CPU-Tweaker 1.1 is very easy to use, as it has a user-friendly interface that shows you all the relevant information about your CPU and memory. You can adjust the values by using sliders or typing them manually, and apply the changes with a single click. You can also save and load different profiles for different overclocking scenarios.
-CPU-Tweaker 1.1 also has a built-in stress test feature that lets you check the stability of your overclock settings. You can run the test for a specified amount of time or until an error occurs. If your computer freezes or crashes during the test, you can restart it and reset the settings to normal.
-CPU-Tweaker 1.1 is a portable program that does not require installation. You can run it from any folder or USB drive. It works on Windows XP, Vista, 7, 8, and 10, both 32-bit and 64-bit versions.
-If you want to download CPU-Tweaker 1.1 for free, you can visit its official website at https://www.tweakers.fr/cpu-tweaker.html or one of the trusted software download sites such as MajorGeeks or Softpedia .
-Before you use CPU-Tweaker 1.1, make sure you have a backup of your important data and a reliable cooling system for your CPU. Overclocking can cause overheating, instability, data loss, or hardware damage if done improperly. Use CPU-Tweaker 1.1 at your own risk and responsibility.
-
-CPU-Tweaker 1.1 is not the only overclocking software available for Windows, but it has some advantages over other similar tools. For example, CPU-Tweaker 1.1 is more lightweight and consumes less system resources than other programs. It also has a simpler and cleaner interface that makes it easier to use and understand.
-CPU-Tweaker 1.1 is also more compatible with different types of CPUs and memory modules than other tools. It can work with both Intel and AMD processors, as well as DDR2, DDR3, and DDR4 RAM. It can also detect and support the latest CPU models and features, such as Turbo Boost and Hyper-Threading.
-
-CPU-Tweaker 1.1 is a great tool for beginners and advanced users alike who want to overclock their CPU and get more performance out of their computer. It is free, portable, easy to use, and effective. However, it is not a magic solution that can make your computer run faster without any drawbacks. Overclocking can have negative effects on your system stability, temperature, power consumption, and lifespan. Therefore, you should always be careful and cautious when using CPU-Tweaker 1.1 or any other overclocking software.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/Code Soft Tp 3160 Driver BEST.md b/spaces/inreVtussa/clothingai/Examples/Code Soft Tp 3160 Driver BEST.md
deleted file mode 100644
index 8559de8c471a68aed0e9a171ce208fab63fbf7ea..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Code Soft Tp 3160 Driver BEST.md
+++ /dev/null
@@ -1,26 +0,0 @@
-Code Soft Tp 3160 Driver Download File ✒ https://tiurll.com/2uCmrx
-
-, the hard driver and the soft driver normally do not play well together. If you want to use both together, it will probably be necessary to go through some effort to ensure they coexist peacefully.
-
-Because of the architecture of hard disks (as opposed to, say, other DIMMs), the BIOS has to take the hard disk device into the online state on bootup. This enables the BIOS to read the hard disk's partition table and make partition bootable. It also makes it possible for the BIOS to partition disks as it boots.
-
-Now let's take a look at how to put the disk back into the standby mode. The standby mode is similar to the online state. For example, the hard disk is ready for use again once the power is turned back on after having been turned off.
-
-You can enter the standby mode in two ways: by pressing the "ESC" key on your keyboard as the computer starts and the hard disk is booting, or by pressing the "power" button on the back of the hard disk drive itself.
-
-If you have only one hard disk installed in the computer, it will boot up to a disk check prompt. Let's examine the case where your hard disk drive has failed.
-
-If your computer is booting and you can still access the BIOS setup screen, you can enter the standby mode by pressing the "ESC" key. Alternatively, you can press the power button on the back of the hard disk drive itself to enter the standby mode.
-
-If your computer is booting and you cannot access the BIOS setup screen, press the power button on the back of the hard disk drive itself to enter the standby mode. Then press the ESC key to make your computer boot up normally.
-
-Once you have entered the standby mode, you should return to the installation disk menu. If you have only one hard disk in the computer, the hard disk will be in the online state.
-
-Since you can boot from the installation disk, the only disk that will be in the standby state is the one that you are installing on. So make sure that the disk you want to install to is not in the online state and you can proceed with installation.
-
-You should note that you can only enter the standby mode on a working hard disk. You cannot enter the standby mode on a failed hard disk.
-
-You can run a hard disk check on a hard disk in the standby state. If you see errors or problems with the disk 4fefd39f24
-
-
-
diff --git a/spaces/ispast/Genshin_MB_VITS_TTS/text/__init__.py b/spaces/ispast/Genshin_MB_VITS_TTS/text/__init__.py
deleted file mode 100644
index 48ae82f3e40ecd1bf17a7de78d87790327af3362..0000000000000000000000000000000000000000
--- a/spaces/ispast/Genshin_MB_VITS_TTS/text/__init__.py
+++ /dev/null
@@ -1,56 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-from text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-
-def text_to_sequence(text, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence
-
-
-def cleaned_text_to_sequence(cleaned_text):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
- return sequence
-
-
-def sequence_to_text(sequence):
- '''Converts a sequence of IDs back to a string'''
- result = ''
- for symbol_id in sequence:
- s = _id_to_symbol[symbol_id]
- result += s
- return result
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/jbilcke-hf/ai-comic-factory/LICENCE.md b/spaces/jbilcke-hf/ai-comic-factory/LICENCE.md
deleted file mode 100644
index 537fde8423156f05dc00b52a4fc8eebd451f66e9..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/ai-comic-factory/LICENCE.md
+++ /dev/null
@@ -1,170 +0,0 @@
-Apache License
-==============
-
-_Version 2.0, January 2004_
-_< >_
-
-### Terms and Conditions for use, reproduction, and distribution
-
-#### 1. Definitions
-
-“License” shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-“Licensor” shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-“Legal Entity” shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, “control” means **(i)** the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
-outstanding shares, or **(iii)** beneficial ownership of such entity.
-
-“You” (or “Your”) shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-“Source” form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-“Object” form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-“Work” shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-“Derivative Works” shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-“Contribution” shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-“submitted” means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as “Not a Contribution.”
-
-“Contributor” shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-#### 2. Grant of Copyright License
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-#### 3. Grant of Patent License
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-#### 4. Redistribution
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-* **(a)** You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-* **(b)** You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-* **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-#### 5. Submission of Contributions
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-#### 6. Trademarks
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-#### 7. Disclaimer of Warranty
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-#### 8. Limitation of Liability
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-#### 9. Accepting Warranty or Additional Liability
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-_END OF TERMS AND CONDITIONS_
\ No newline at end of file
diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/input.tsx b/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/input.tsx
deleted file mode 100644
index 0757ddebdca3800bbd4a46fe1c2c17dff86c5e2f..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/ai-comic-factory/src/components/ui/input.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-import * as React from "react"
-
-import { cn } from "@/lib/utils"
-
-export interface InputProps
- extends React.InputHTMLAttributes {}
-
-const Input = React.forwardRef(
- ({ className, type, ...props }, ref) => {
- return (
-
- )
- }
-)
-Input.displayName = "Input"
-
-export { Input }
diff --git a/spaces/jie1/jie_test4/app.py b/spaces/jie1/jie_test4/app.py
deleted file mode 100644
index ad1c650687829e4e1f8cb67bcfd7075b666aef38..0000000000000000000000000000000000000000
--- a/spaces/jie1/jie_test4/app.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import gradio as gr
-from Sort_Scores import *
-from Preinput_Merge import *
-from Sort_Dlkcat import *
-from Merge_Dlsc import *
-from Sort_Sco_Kcat import *
-from Plt import *
-
-with gr.Blocks(css=".gradio-container {background-image: url('file=background.jpeg')}") as demo:
- gr.Markdown("Welcome using this demo.")
- with gr.Tab("HelloWorld"):
- gr.Markdown("Welcome using this demo.")
- gr.Markdown("This is a succend test")
- gr.Markdown("I think this demo can do some things")
- gr.Markdown("在sort里,可以对scores文件,dlkcat文件,以及合并后的scores与dlkcat文件进行排序")
- gr.Markdown("Pre Merge里,可以将序列文件与smi文件进行合并,合并后可以进行dlkcat值的计算,合并前若序列文件需要处理换行符也可以对其进行处理")
- gr.Markdown("Merge Dlsc里,可以合并scores值文件和dlkcat文件,注意这两个文件序列需要一致")
- gr.Markdown("详细的说明可以看Files and versions 里的README.md 文件")
- with gr.Tab("Sort"):
- file1_input = gr.File(label="输入相关文件")
- file1_output1 = gr.File()
- file1_output2 = gr.Textbox()
- with gr.Row():
- file1_button1 = gr.Button("Sort Scores")
- file1_button2 = gr.Button("Sort Dlkcat")
- file1_button3 = gr.Button("Sort Mergekcat")
-
- with gr.Tab("Pre Merge"):
- with gr.Row():
- file2_input1 = gr.File(label="strip_file")
- file2_input2 = gr.File(label="smi_file")
- file2_input3 = gr.File(label="seq_file")
- file2_output = gr.File()
- file2_button1 = gr.Button("Strip")
- with gr.Row():
- file2_button2 = gr.Button("Merge")
- file2_button3 = gr.Button("Merge All")
-
- with gr.Tab("Merge Dlsc"):
- with gr.Row():
- file4_input1 = gr.File(label="sc_file")
- file4_input2 = gr.File(label="cat_file")
- file4_output = gr.File()
- file4_button = gr.Button("Merge")
-
- with gr.Tab("Plt Picture"):
- file5_input = gr.File(label="log_file")
- file5_output = gr.File()
- file5_button = gr.Button("Plt")
-
- with gr.Accordion("Open for More!"):
- gr.Markdown("Look at me...")
-
- file1_button1.click(Sort_Scores, inputs=file1_input, outputs=file1_output2)
- file1_button2.click(Sort_Dlkcat, inputs=file1_input, outputs=file1_output1)
- file1_button3.click(Sort_Sco_Kcat, inputs=file1_input, outputs=file1_output1)
-
- file2_button1.click(Strip, inputs=file2_input1, outputs=file2_output)
- file2_button2.click(Merge, inputs=[file2_input2, file2_input3], outputs=file2_output)
- file2_button3.click(Merge_All, inputs=[file2_input2, file2_input3], outputs=file2_output)
-
- file4_button.click(Merge_Dlsc, inputs=[file4_input1, file4_input2], outputs=file4_output)
-
- file5_button.click(Plt, inputs=file5_input, outputs=file5_output)
-
-if __name__ == "__main__":
- demo.launch()
-
diff --git a/spaces/jkang/demo-image-pyxelate/gradio_pyxelate.py b/spaces/jkang/demo-image-pyxelate/gradio_pyxelate.py
deleted file mode 100644
index a88ebd04c2aea2d824b0296b48dcfd1213caa593..0000000000000000000000000000000000000000
--- a/spaces/jkang/demo-image-pyxelate/gradio_pyxelate.py
+++ /dev/null
@@ -1,87 +0,0 @@
-'''Pyxelate Demo
-
-- Based on https://huggingface.co/spaces/akhaliq/Pyxelate
-- Credits to akhaliq
-
-2021-12-16 first created for testing
-'''
-
-import os
-from glob import glob
-from loguru import logger
-
-import gradio as gr
-from skimage import io as sio
-from pyxelate import Pyx, Pal
-
-
-# ----------- Settings -----------
-examples = sorted(glob(os.path.join('examples', '*.jpg')))
-examples = [[image_file, 5, 5, 'none', 'none'] for image_file in examples]
-
-# ----------- Logging -----------
-logger.add('app.log', mode='a')
-logger.info('===== APP RESTARTED =====')
-
-# ----------- Params -----------
-DOWNSAMPLE_MIN = 1
-DOWNSAMPLE_MAX = 10
-COLOR_MIN = 1
-COLOR_MAX = 20
-PALETTE_CHOICES = [
- 'none', # if not chosen
- 'TELETEXT', 'BBC_MICRO', 'CGA_MODE4_PAL1', 'CGA_MODE5_PAL1',
- 'CGA_MODE4_PAL2', 'ZX_SPECTRUM', 'APPLE_II_LO', 'APPLE_II_HI',
- 'COMMODORE_64', 'GAMEBOY_COMBO_UP', 'GAMEBOY_COMBO_DOWN', 'GAMEBOY_COMBO_LEFT',
- 'GAMEBOY_COMBO_RIGHT', 'GAMEBOY_A_UP', 'GAMEBOY_A_DOWN', 'GAMEBOY_A_LEFT',
- 'GAMEBOY_A_RIGHT', 'GAMEBOY_B_UP', 'GAMEBOY_B_DOWN', 'GAMEBOY_B_LEFT',
- 'GAMEBOY_B_RIGHT', 'GAMEBOY_ORIGINAL', 'GAMEBOY_POCKET', 'GAMEBOY_VIRTUALBOY',
- 'MICROSOFT_WINDOWS_16', 'MICROSOFT_WINDOWS_20', 'MICROSOFT_WINDOWS_PAINT',
- 'PICO_8', 'MSX', 'MONO_OBRADINN_IBM', 'MONO_OBRADINN_MAC', 'MONO_BJG', 'MONO_BW',
- 'MONO_PHOSPHOR_AMBER', 'MONO_PHOSPHOR_LTAMBER', 'MONO_PHOSPHOR_GREEN1',
- 'MONO_PHOSPHOR_GREEN2', 'MONO_PHOSPHOR_GREEN3', 'MONO_PHOSPHOR_APPLE', 'APPLE_II_MONO',
- 'MONO_PHOSPHOR_APPLEC', 'APPLE_II_MONOC'
-]
-DITHER_CHOICES = ['none', 'naive', 'bayer', 'floyd', 'atkinson']
-
-
-def predict(image_obj, sampling_param, color_param, palette_param, dither_param):
- img = sio.imread(image_obj.name)
- logger.info('--- image loaded')
-
- if palette_param != 'none':
- color_param = 'none'
- palette = Pal[palette_param]
- else:
- palette = color_param
-
- pyx = Pyx(factor=sampling_param, palette=palette, dither=dither_param)
- pyx.fit(img)
- img_out = pyx.transform(img)
- logger.info('--- output generated')
- return img_out
-
-iface = gr.Interface(
- predict,
- title='이미지를 픽셀화 시키는 데모입니다.',
- description='이미지가 주어졌을 때 이미지를 스케일링하고 색감을 바꾸어 픽셀화 시킬 수 있습니다.',
- inputs=[
- gr.inputs.Image(label='인풋 이미지를 준비해주세요', type='file'),
- gr.inputs.Slider(label='다운샘플할 크기를 입력해주세요 (클수록 픽셀크기가 커짐 = 저화질)',
- minimum=DOWNSAMPLE_MIN, maximum=DOWNSAMPLE_MAX, step=1, default=5),
- gr.inputs.Slider(label='사용할 색깔의 개수를 입력해주세요 (선택 시 아래 팔레트 설정은 "none"으로 해주세요)',
- minimum=DOWNSAMPLE_MIN, maximum=DOWNSAMPLE_MAX, step=1, default=5),
- gr.inputs.Dropdown(label='색깔 팔레트를 선택하세요 (선택 시 위의 색깔 개수는 무시됩니다)',
- choices=PALETTE_CHOICES, default='none', type='value'),
- gr.inputs.Dropdown(label='결과 이미지의 부드러움 정도를 나타냅니다 ("dithering")',
- choices=DITHER_CHOICES, default='none', type='value'),
- ],
- outputs=[
- gr.outputs.Image(label='결과 이미지 입니다')
- ],
- examples=examples,
- enable_queue=True,
- article='Credits to GitHub
',
-)
-
-iface.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/click/termui.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/click/termui.py
deleted file mode 100644
index db7a4b286174fdf26f3251631a2066eda2fa5bea..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/click/termui.py
+++ /dev/null
@@ -1,784 +0,0 @@
-import inspect
-import io
-import itertools
-import sys
-import typing as t
-from gettext import gettext as _
-
-from ._compat import isatty
-from ._compat import strip_ansi
-from .exceptions import Abort
-from .exceptions import UsageError
-from .globals import resolve_color_default
-from .types import Choice
-from .types import convert_type
-from .types import ParamType
-from .utils import echo
-from .utils import LazyFile
-
-if t.TYPE_CHECKING:
- from ._termui_impl import ProgressBar
-
-V = t.TypeVar("V")
-
-# The prompt functions to use. The doc tools currently override these
-# functions to customize how they work.
-visible_prompt_func: t.Callable[[str], str] = input
-
-_ansi_colors = {
- "black": 30,
- "red": 31,
- "green": 32,
- "yellow": 33,
- "blue": 34,
- "magenta": 35,
- "cyan": 36,
- "white": 37,
- "reset": 39,
- "bright_black": 90,
- "bright_red": 91,
- "bright_green": 92,
- "bright_yellow": 93,
- "bright_blue": 94,
- "bright_magenta": 95,
- "bright_cyan": 96,
- "bright_white": 97,
-}
-_ansi_reset_all = "\033[0m"
-
-
-def hidden_prompt_func(prompt: str) -> str:
- import getpass
-
- return getpass.getpass(prompt)
-
-
-def _build_prompt(
- text: str,
- suffix: str,
- show_default: bool = False,
- default: t.Optional[t.Any] = None,
- show_choices: bool = True,
- type: t.Optional[ParamType] = None,
-) -> str:
- prompt = text
- if type is not None and show_choices and isinstance(type, Choice):
- prompt += f" ({', '.join(map(str, type.choices))})"
- if default is not None and show_default:
- prompt = f"{prompt} [{_format_default(default)}]"
- return f"{prompt}{suffix}"
-
-
-def _format_default(default: t.Any) -> t.Any:
- if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
- return default.name
-
- return default
-
-
-def prompt(
- text: str,
- default: t.Optional[t.Any] = None,
- hide_input: bool = False,
- confirmation_prompt: t.Union[bool, str] = False,
- type: t.Optional[t.Union[ParamType, t.Any]] = None,
- value_proc: t.Optional[t.Callable[[str], t.Any]] = None,
- prompt_suffix: str = ": ",
- show_default: bool = True,
- err: bool = False,
- show_choices: bool = True,
-) -> t.Any:
- """Prompts a user for input. This is a convenience function that can
- be used to prompt a user for input later.
-
- If the user aborts the input by sending an interrupt signal, this
- function will catch it and raise a :exc:`Abort` exception.
-
- :param text: the text to show for the prompt.
- :param default: the default value to use if no input happens. If this
- is not given it will prompt until it's aborted.
- :param hide_input: if this is set to true then the input value will
- be hidden.
- :param confirmation_prompt: Prompt a second time to confirm the
- value. Can be set to a string instead of ``True`` to customize
- the message.
- :param type: the type to use to check the value against.
- :param value_proc: if this parameter is provided it's a function that
- is invoked instead of the type conversion to
- convert a value.
- :param prompt_suffix: a suffix that should be added to the prompt.
- :param show_default: shows or hides the default value in the prompt.
- :param err: if set to true the file defaults to ``stderr`` instead of
- ``stdout``, the same as with echo.
- :param show_choices: Show or hide choices if the passed type is a Choice.
- For example if type is a Choice of either day or week,
- show_choices is true and text is "Group by" then the
- prompt will be "Group by (day, week): ".
-
- .. versionadded:: 8.0
- ``confirmation_prompt`` can be a custom string.
-
- .. versionadded:: 7.0
- Added the ``show_choices`` parameter.
-
- .. versionadded:: 6.0
- Added unicode support for cmd.exe on Windows.
-
- .. versionadded:: 4.0
- Added the `err` parameter.
-
- """
-
- def prompt_func(text: str) -> str:
- f = hidden_prompt_func if hide_input else visible_prompt_func
- try:
- # Write the prompt separately so that we get nice
- # coloring through colorama on Windows
- echo(text.rstrip(" "), nl=False, err=err)
- # Echo a space to stdout to work around an issue where
- # readline causes backspace to clear the whole line.
- return f(" ")
- except (KeyboardInterrupt, EOFError):
- # getpass doesn't print a newline if the user aborts input with ^C.
- # Allegedly this behavior is inherited from getpass(3).
- # A doc bug has been filed at https://bugs.python.org/issue24711
- if hide_input:
- echo(None, err=err)
- raise Abort() from None
-
- if value_proc is None:
- value_proc = convert_type(type, default)
-
- prompt = _build_prompt(
- text, prompt_suffix, show_default, default, show_choices, type
- )
-
- if confirmation_prompt:
- if confirmation_prompt is True:
- confirmation_prompt = _("Repeat for confirmation")
-
- confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix)
-
- while True:
- while True:
- value = prompt_func(prompt)
- if value:
- break
- elif default is not None:
- value = default
- break
- try:
- result = value_proc(value)
- except UsageError as e:
- if hide_input:
- echo(_("Error: The value you entered was invalid."), err=err)
- else:
- echo(_("Error: {e.message}").format(e=e), err=err) # noqa: B306
- continue
- if not confirmation_prompt:
- return result
- while True:
- value2 = prompt_func(confirmation_prompt)
- is_empty = not value and not value2
- if value2 or is_empty:
- break
- if value == value2:
- return result
- echo(_("Error: The two entered values do not match."), err=err)
-
-
-def confirm(
- text: str,
- default: t.Optional[bool] = False,
- abort: bool = False,
- prompt_suffix: str = ": ",
- show_default: bool = True,
- err: bool = False,
-) -> bool:
- """Prompts for confirmation (yes/no question).
-
- If the user aborts the input by sending a interrupt signal this
- function will catch it and raise a :exc:`Abort` exception.
-
- :param text: the question to ask.
- :param default: The default value to use when no input is given. If
- ``None``, repeat until input is given.
- :param abort: if this is set to `True` a negative answer aborts the
- exception by raising :exc:`Abort`.
- :param prompt_suffix: a suffix that should be added to the prompt.
- :param show_default: shows or hides the default value in the prompt.
- :param err: if set to true the file defaults to ``stderr`` instead of
- ``stdout``, the same as with echo.
-
- .. versionchanged:: 8.0
- Repeat until input is given if ``default`` is ``None``.
-
- .. versionadded:: 4.0
- Added the ``err`` parameter.
- """
- prompt = _build_prompt(
- text,
- prompt_suffix,
- show_default,
- "y/n" if default is None else ("Y/n" if default else "y/N"),
- )
-
- while True:
- try:
- # Write the prompt separately so that we get nice
- # coloring through colorama on Windows
- echo(prompt.rstrip(" "), nl=False, err=err)
- # Echo a space to stdout to work around an issue where
- # readline causes backspace to clear the whole line.
- value = visible_prompt_func(" ").lower().strip()
- except (KeyboardInterrupt, EOFError):
- raise Abort() from None
- if value in ("y", "yes"):
- rv = True
- elif value in ("n", "no"):
- rv = False
- elif default is not None and value == "":
- rv = default
- else:
- echo(_("Error: invalid input"), err=err)
- continue
- break
- if abort and not rv:
- raise Abort()
- return rv
-
-
-def echo_via_pager(
- text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str],
- color: t.Optional[bool] = None,
-) -> None:
- """This function takes a text and shows it via an environment specific
- pager on stdout.
-
- .. versionchanged:: 3.0
- Added the `color` flag.
-
- :param text_or_generator: the text to page, or alternatively, a
- generator emitting the text to page.
- :param color: controls if the pager supports ANSI colors or not. The
- default is autodetection.
- """
- color = resolve_color_default(color)
-
- if inspect.isgeneratorfunction(text_or_generator):
- i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)()
- elif isinstance(text_or_generator, str):
- i = [text_or_generator]
- else:
- i = iter(t.cast(t.Iterable[str], text_or_generator))
-
- # convert every element of i to a text type if necessary
- text_generator = (el if isinstance(el, str) else str(el) for el in i)
-
- from ._termui_impl import pager
-
- return pager(itertools.chain(text_generator, "\n"), color)
-
-
-def progressbar(
- iterable: t.Optional[t.Iterable[V]] = None,
- length: t.Optional[int] = None,
- label: t.Optional[str] = None,
- show_eta: bool = True,
- show_percent: t.Optional[bool] = None,
- show_pos: bool = False,
- item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,
- fill_char: str = "#",
- empty_char: str = "-",
- bar_template: str = "%(label)s [%(bar)s] %(info)s",
- info_sep: str = " ",
- width: int = 36,
- file: t.Optional[t.TextIO] = None,
- color: t.Optional[bool] = None,
- update_min_steps: int = 1,
-) -> "ProgressBar[V]":
- """This function creates an iterable context manager that can be used
- to iterate over something while showing a progress bar. It will
- either iterate over the `iterable` or `length` items (that are counted
- up). While iteration happens, this function will print a rendered
- progress bar to the given `file` (defaults to stdout) and will attempt
- to calculate remaining time and more. By default, this progress bar
- will not be rendered if the file is not a terminal.
-
- The context manager creates the progress bar. When the context
- manager is entered the progress bar is already created. With every
- iteration over the progress bar, the iterable passed to the bar is
- advanced and the bar is updated. When the context manager exits,
- a newline is printed and the progress bar is finalized on screen.
-
- Note: The progress bar is currently designed for use cases where the
- total progress can be expected to take at least several seconds.
- Because of this, the ProgressBar class object won't display
- progress that is considered too fast, and progress where the time
- between steps is less than a second.
-
- No printing must happen or the progress bar will be unintentionally
- destroyed.
-
- Example usage::
-
- with progressbar(items) as bar:
- for item in bar:
- do_something_with(item)
-
- Alternatively, if no iterable is specified, one can manually update the
- progress bar through the `update()` method instead of directly
- iterating over the progress bar. The update method accepts the number
- of steps to increment the bar with::
-
- with progressbar(length=chunks.total_bytes) as bar:
- for chunk in chunks:
- process_chunk(chunk)
- bar.update(chunks.bytes)
-
- The ``update()`` method also takes an optional value specifying the
- ``current_item`` at the new position. This is useful when used
- together with ``item_show_func`` to customize the output for each
- manual step::
-
- with click.progressbar(
- length=total_size,
- label='Unzipping archive',
- item_show_func=lambda a: a.filename
- ) as bar:
- for archive in zip_file:
- archive.extract()
- bar.update(archive.size, archive)
-
- :param iterable: an iterable to iterate over. If not provided the length
- is required.
- :param length: the number of items to iterate over. By default the
- progressbar will attempt to ask the iterator about its
- length, which might or might not work. If an iterable is
- also provided this parameter can be used to override the
- length. If an iterable is not provided the progress bar
- will iterate over a range of that length.
- :param label: the label to show next to the progress bar.
- :param show_eta: enables or disables the estimated time display. This is
- automatically disabled if the length cannot be
- determined.
- :param show_percent: enables or disables the percentage display. The
- default is `True` if the iterable has a length or
- `False` if not.
- :param show_pos: enables or disables the absolute position display. The
- default is `False`.
- :param item_show_func: A function called with the current item which
- can return a string to show next to the progress bar. If the
- function returns ``None`` nothing is shown. The current item can
- be ``None``, such as when entering and exiting the bar.
- :param fill_char: the character to use to show the filled part of the
- progress bar.
- :param empty_char: the character to use to show the non-filled part of
- the progress bar.
- :param bar_template: the format string to use as template for the bar.
- The parameters in it are ``label`` for the label,
- ``bar`` for the progress bar and ``info`` for the
- info section.
- :param info_sep: the separator between multiple info items (eta etc.)
- :param width: the width of the progress bar in characters, 0 means full
- terminal width
- :param file: The file to write to. If this is not a terminal then
- only the label is printed.
- :param color: controls if the terminal supports ANSI colors or not. The
- default is autodetection. This is only needed if ANSI
- codes are included anywhere in the progress bar output
- which is not the case by default.
- :param update_min_steps: Render only when this many updates have
- completed. This allows tuning for very fast iterators.
-
- .. versionchanged:: 8.0
- Output is shown even if execution time is less than 0.5 seconds.
-
- .. versionchanged:: 8.0
- ``item_show_func`` shows the current item, not the previous one.
-
- .. versionchanged:: 8.0
- Labels are echoed if the output is not a TTY. Reverts a change
- in 7.0 that removed all output.
-
- .. versionadded:: 8.0
- Added the ``update_min_steps`` parameter.
-
- .. versionchanged:: 4.0
- Added the ``color`` parameter. Added the ``update`` method to
- the object.
-
- .. versionadded:: 2.0
- """
- from ._termui_impl import ProgressBar
-
- color = resolve_color_default(color)
- return ProgressBar(
- iterable=iterable,
- length=length,
- show_eta=show_eta,
- show_percent=show_percent,
- show_pos=show_pos,
- item_show_func=item_show_func,
- fill_char=fill_char,
- empty_char=empty_char,
- bar_template=bar_template,
- info_sep=info_sep,
- file=file,
- label=label,
- width=width,
- color=color,
- update_min_steps=update_min_steps,
- )
-
-
-def clear() -> None:
- """Clears the terminal screen. This will have the effect of clearing
- the whole visible space of the terminal and moving the cursor to the
- top left. This does not do anything if not connected to a terminal.
-
- .. versionadded:: 2.0
- """
- if not isatty(sys.stdout):
- return
-
- # ANSI escape \033[2J clears the screen, \033[1;1H moves the cursor
- echo("\033[2J\033[1;1H", nl=False)
-
-
-def _interpret_color(
- color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0
-) -> str:
- if isinstance(color, int):
- return f"{38 + offset};5;{color:d}"
-
- if isinstance(color, (tuple, list)):
- r, g, b = color
- return f"{38 + offset};2;{r:d};{g:d};{b:d}"
-
- return str(_ansi_colors[color] + offset)
-
-
-def style(
- text: t.Any,
- fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
- bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
- bold: t.Optional[bool] = None,
- dim: t.Optional[bool] = None,
- underline: t.Optional[bool] = None,
- overline: t.Optional[bool] = None,
- italic: t.Optional[bool] = None,
- blink: t.Optional[bool] = None,
- reverse: t.Optional[bool] = None,
- strikethrough: t.Optional[bool] = None,
- reset: bool = True,
-) -> str:
- """Styles a text with ANSI styles and returns the new string. By
- default the styling is self contained which means that at the end
- of the string a reset code is issued. This can be prevented by
- passing ``reset=False``.
-
- Examples::
-
- click.echo(click.style('Hello World!', fg='green'))
- click.echo(click.style('ATTENTION!', blink=True))
- click.echo(click.style('Some things', reverse=True, fg='cyan'))
- click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))
-
- Supported color names:
-
- * ``black`` (might be a gray)
- * ``red``
- * ``green``
- * ``yellow`` (might be an orange)
- * ``blue``
- * ``magenta``
- * ``cyan``
- * ``white`` (might be light gray)
- * ``bright_black``
- * ``bright_red``
- * ``bright_green``
- * ``bright_yellow``
- * ``bright_blue``
- * ``bright_magenta``
- * ``bright_cyan``
- * ``bright_white``
- * ``reset`` (reset the color code only)
-
- If the terminal supports it, color may also be specified as:
-
- - An integer in the interval [0, 255]. The terminal must support
- 8-bit/256-color mode.
- - An RGB tuple of three integers in [0, 255]. The terminal must
- support 24-bit/true-color mode.
-
- See https://en.wikipedia.org/wiki/ANSI_color and
- https://gist.github.com/XVilka/8346728 for more information.
-
- :param text: the string to style with ansi codes.
- :param fg: if provided this will become the foreground color.
- :param bg: if provided this will become the background color.
- :param bold: if provided this will enable or disable bold mode.
- :param dim: if provided this will enable or disable dim mode. This is
- badly supported.
- :param underline: if provided this will enable or disable underline.
- :param overline: if provided this will enable or disable overline.
- :param italic: if provided this will enable or disable italic.
- :param blink: if provided this will enable or disable blinking.
- :param reverse: if provided this will enable or disable inverse
- rendering (foreground becomes background and the
- other way round).
- :param strikethrough: if provided this will enable or disable
- striking through text.
- :param reset: by default a reset-all code is added at the end of the
- string which means that styles do not carry over. This
- can be disabled to compose styles.
-
- .. versionchanged:: 8.0
- A non-string ``message`` is converted to a string.
-
- .. versionchanged:: 8.0
- Added support for 256 and RGB color codes.
-
- .. versionchanged:: 8.0
- Added the ``strikethrough``, ``italic``, and ``overline``
- parameters.
-
- .. versionchanged:: 7.0
- Added support for bright colors.
-
- .. versionadded:: 2.0
- """
- if not isinstance(text, str):
- text = str(text)
-
- bits = []
-
- if fg:
- try:
- bits.append(f"\033[{_interpret_color(fg)}m")
- except KeyError:
- raise TypeError(f"Unknown color {fg!r}") from None
-
- if bg:
- try:
- bits.append(f"\033[{_interpret_color(bg, 10)}m")
- except KeyError:
- raise TypeError(f"Unknown color {bg!r}") from None
-
- if bold is not None:
- bits.append(f"\033[{1 if bold else 22}m")
- if dim is not None:
- bits.append(f"\033[{2 if dim else 22}m")
- if underline is not None:
- bits.append(f"\033[{4 if underline else 24}m")
- if overline is not None:
- bits.append(f"\033[{53 if overline else 55}m")
- if italic is not None:
- bits.append(f"\033[{3 if italic else 23}m")
- if blink is not None:
- bits.append(f"\033[{5 if blink else 25}m")
- if reverse is not None:
- bits.append(f"\033[{7 if reverse else 27}m")
- if strikethrough is not None:
- bits.append(f"\033[{9 if strikethrough else 29}m")
- bits.append(text)
- if reset:
- bits.append(_ansi_reset_all)
- return "".join(bits)
-
-
-def unstyle(text: str) -> str:
- """Removes ANSI styling information from a string. Usually it's not
- necessary to use this function as Click's echo function will
- automatically remove styling if necessary.
-
- .. versionadded:: 2.0
-
- :param text: the text to remove style information from.
- """
- return strip_ansi(text)
-
-
-def secho(
- message: t.Optional[t.Any] = None,
- file: t.Optional[t.IO[t.AnyStr]] = None,
- nl: bool = True,
- err: bool = False,
- color: t.Optional[bool] = None,
- **styles: t.Any,
-) -> None:
- """This function combines :func:`echo` and :func:`style` into one
- call. As such the following two calls are the same::
-
- click.secho('Hello World!', fg='green')
- click.echo(click.style('Hello World!', fg='green'))
-
- All keyword arguments are forwarded to the underlying functions
- depending on which one they go with.
-
- Non-string types will be converted to :class:`str`. However,
- :class:`bytes` are passed directly to :meth:`echo` without applying
- style. If you want to style bytes that represent text, call
- :meth:`bytes.decode` first.
-
- .. versionchanged:: 8.0
- A non-string ``message`` is converted to a string. Bytes are
- passed through without style applied.
-
- .. versionadded:: 2.0
- """
- if message is not None and not isinstance(message, (bytes, bytearray)):
- message = style(message, **styles)
-
- return echo(message, file=file, nl=nl, err=err, color=color)
-
-
-def edit(
- text: t.Optional[t.AnyStr] = None,
- editor: t.Optional[str] = None,
- env: t.Optional[t.Mapping[str, str]] = None,
- require_save: bool = True,
- extension: str = ".txt",
- filename: t.Optional[str] = None,
-) -> t.Optional[t.AnyStr]:
- r"""Edits the given text in the defined editor. If an editor is given
- (should be the full path to the executable but the regular operating
- system search path is used for finding the executable) it overrides
- the detected editor. Optionally, some environment variables can be
- used. If the editor is closed without changes, `None` is returned. In
- case a file is edited directly the return value is always `None` and
- `require_save` and `extension` are ignored.
-
- If the editor cannot be opened a :exc:`UsageError` is raised.
-
- Note for Windows: to simplify cross-platform usage, the newlines are
- automatically converted from POSIX to Windows and vice versa. As such,
- the message here will have ``\n`` as newline markers.
-
- :param text: the text to edit.
- :param editor: optionally the editor to use. Defaults to automatic
- detection.
- :param env: environment variables to forward to the editor.
- :param require_save: if this is true, then not saving in the editor
- will make the return value become `None`.
- :param extension: the extension to tell the editor about. This defaults
- to `.txt` but changing this might change syntax
- highlighting.
- :param filename: if provided it will edit this file instead of the
- provided text contents. It will not use a temporary
- file as an indirection in that case.
- """
- from ._termui_impl import Editor
-
- ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension)
-
- if filename is None:
- return ed.edit(text)
-
- ed.edit_file(filename)
- return None
-
-
-def launch(url: str, wait: bool = False, locate: bool = False) -> int:
- """This function launches the given URL (or filename) in the default
- viewer application for this file type. If this is an executable, it
- might launch the executable in a new session. The return value is
- the exit code of the launched application. Usually, ``0`` indicates
- success.
-
- Examples::
-
- click.launch('https://click.palletsprojects.com/')
- click.launch('/my/downloaded/file', locate=True)
-
- .. versionadded:: 2.0
-
- :param url: URL or filename of the thing to launch.
- :param wait: Wait for the program to exit before returning. This
- only works if the launched program blocks. In particular,
- ``xdg-open`` on Linux does not block.
- :param locate: if this is set to `True` then instead of launching the
- application associated with the URL it will attempt to
- launch a file manager with the file located. This
- might have weird effects if the URL does not point to
- the filesystem.
- """
- from ._termui_impl import open_url
-
- return open_url(url, wait=wait, locate=locate)
-
-
-# If this is provided, getchar() calls into this instead. This is used
-# for unittesting purposes.
-_getchar: t.Optional[t.Callable[[bool], str]] = None
-
-
-def getchar(echo: bool = False) -> str:
- """Fetches a single character from the terminal and returns it. This
- will always return a unicode character and under certain rare
- circumstances this might return more than one character. The
- situations which more than one character is returned is when for
- whatever reason multiple characters end up in the terminal buffer or
- standard input was not actually a terminal.
-
- Note that this will always read from the terminal, even if something
- is piped into the standard input.
-
- Note for Windows: in rare cases when typing non-ASCII characters, this
- function might wait for a second character and then return both at once.
- This is because certain Unicode characters look like special-key markers.
-
- .. versionadded:: 2.0
-
- :param echo: if set to `True`, the character read will also show up on
- the terminal. The default is to not show it.
- """
- global _getchar
-
- if _getchar is None:
- from ._termui_impl import getchar as f
-
- _getchar = f
-
- return _getchar(echo)
-
-
-def raw_terminal() -> t.ContextManager[int]:
- from ._termui_impl import raw_terminal as f
-
- return f()
-
-
-def pause(info: t.Optional[str] = None, err: bool = False) -> None:
- """This command stops execution and waits for the user to press any
- key to continue. This is similar to the Windows batch "pause"
- command. If the program is not run through a terminal, this command
- will instead do nothing.
-
- .. versionadded:: 2.0
-
- .. versionadded:: 4.0
- Added the `err` parameter.
-
- :param info: The message to print before pausing. Defaults to
- ``"Press any key to continue..."``.
- :param err: if set to message goes to ``stderr`` instead of
- ``stdout``, the same as with echo.
- """
- if not isatty(sys.stdin) or not isatty(sys.stdout):
- return
-
- if info is None:
- info = _("Press any key to continue...")
-
- try:
- if info:
- echo(info, nl=False, err=err)
- try:
- getchar()
- except (KeyboardInterrupt, EOFError):
- pass
- finally:
- if info:
- echo(err=err)
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/registry.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/registry.py
deleted file mode 100644
index 851bc65bc8fa1ea01a48d425563bce06ccfe8ecd..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/registry.py
+++ /dev/null
@@ -1,297 +0,0 @@
-from __future__ import annotations
-
-import importlib
-import types
-import warnings
-
-__all__ = ["registry", "get_filesystem_class", "default"]
-
-# internal, mutable
-_registry: dict[str, type] = {}
-
-# external, immutable
-registry = types.MappingProxyType(_registry)
-default = "file"
-
-
-def register_implementation(name, cls, clobber=False, errtxt=None):
- """Add implementation class to the registry
-
- Parameters
- ----------
- name: str
- Protocol name to associate with the class
- cls: class or str
- if a class: fsspec-compliant implementation class (normally inherits from
- ``fsspec.AbstractFileSystem``, gets added straight to the registry. If a
- str, the full path to an implementation class like package.module.class,
- which gets added to known_implementations,
- so the import is deferred until the filesystem is actually used.
- clobber: bool (optional)
- Whether to overwrite a protocol with the same name; if False, will raise
- instead.
- errtxt: str (optional)
- If given, then a failure to import the given class will result in this
- text being given.
- """
- if isinstance(cls, str):
- if name in known_implementations and clobber is False:
- if cls != known_implementations[name]["class"]:
- raise ValueError(
- "Name (%s) already in the known_implementations and clobber "
- "is False" % name
- )
- else:
- known_implementations[name] = {
- "class": cls,
- "err": errtxt or "%s import failed for protocol %s" % (cls, name),
- }
-
- else:
- if name in registry and clobber is False:
- if _registry[name] is not cls:
- raise ValueError(
- "Name (%s) already in the registry and clobber is False" % name
- )
- else:
- _registry[name] = cls
-
-
-# protocols mapped to the class which implements them. This dict can
-# updated with register_implementation
-known_implementations = {
- "file": {"class": "fsspec.implementations.local.LocalFileSystem"},
- "memory": {"class": "fsspec.implementations.memory.MemoryFileSystem"},
- "dropbox": {
- "class": "dropboxdrivefs.DropboxDriveFileSystem",
- "err": (
- 'DropboxFileSystem requires "dropboxdrivefs",'
- '"requests" and "dropbox" to be installed'
- ),
- },
- "http": {
- "class": "fsspec.implementations.http.HTTPFileSystem",
- "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed',
- },
- "https": {
- "class": "fsspec.implementations.http.HTTPFileSystem",
- "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed',
- },
- "zip": {"class": "fsspec.implementations.zip.ZipFileSystem"},
- "tar": {"class": "fsspec.implementations.tar.TarFileSystem"},
- "gcs": {
- "class": "gcsfs.GCSFileSystem",
- "err": "Please install gcsfs to access Google Storage",
- },
- "gs": {
- "class": "gcsfs.GCSFileSystem",
- "err": "Please install gcsfs to access Google Storage",
- },
- "gdrive": {
- "class": "gdrivefs.GoogleDriveFileSystem",
- "err": "Please install gdrivefs for access to Google Drive",
- },
- "sftp": {
- "class": "fsspec.implementations.sftp.SFTPFileSystem",
- "err": 'SFTPFileSystem requires "paramiko" to be installed',
- },
- "ssh": {
- "class": "fsspec.implementations.sftp.SFTPFileSystem",
- "err": 'SFTPFileSystem requires "paramiko" to be installed',
- },
- "ftp": {"class": "fsspec.implementations.ftp.FTPFileSystem"},
- "hdfs": {
- "class": "fsspec.implementations.arrow.HadoopFileSystem",
- "err": "pyarrow and local java libraries required for HDFS",
- },
- "arrow_hdfs": {
- "class": "fsspec.implementations.arrow.HadoopFileSystem",
- "err": "pyarrow and local java libraries required for HDFS",
- },
- "webhdfs": {
- "class": "fsspec.implementations.webhdfs.WebHDFS",
- "err": 'webHDFS access requires "requests" to be installed',
- },
- "s3": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"},
- "s3a": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"},
- "wandb": {"class": "wandbfs.WandbFS", "err": "Install wandbfs to access wandb"},
- "oci": {
- "class": "ocifs.OCIFileSystem",
- "err": "Install ocifs to access OCI Object Storage",
- },
- "ocilake": {
- "class": "ocifs.OCIFileSystem",
- "err": "Install ocifs to access OCI Data Lake",
- },
- "asynclocal": {
- "class": "morefs.asyn_local.AsyncLocalFileSystem",
- "err": "Install 'morefs[asynclocalfs]' to use AsyncLocalFileSystem",
- },
- "adl": {
- "class": "adlfs.AzureDatalakeFileSystem",
- "err": "Install adlfs to access Azure Datalake Gen1",
- },
- "abfs": {
- "class": "adlfs.AzureBlobFileSystem",
- "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage",
- },
- "az": {
- "class": "adlfs.AzureBlobFileSystem",
- "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage",
- },
- "cached": {"class": "fsspec.implementations.cached.CachingFileSystem"},
- "blockcache": {"class": "fsspec.implementations.cached.CachingFileSystem"},
- "filecache": {"class": "fsspec.implementations.cached.WholeFileCacheFileSystem"},
- "simplecache": {"class": "fsspec.implementations.cached.SimpleCacheFileSystem"},
- "dask": {
- "class": "fsspec.implementations.dask.DaskWorkerFileSystem",
- "err": "Install dask distributed to access worker file system",
- },
- "dbfs": {
- "class": "fsspec.implementations.dbfs.DatabricksFileSystem",
- "err": "Install the requests package to use the DatabricksFileSystem",
- },
- "github": {
- "class": "fsspec.implementations.github.GithubFileSystem",
- "err": "Install the requests package to use the github FS",
- },
- "git": {
- "class": "fsspec.implementations.git.GitFileSystem",
- "err": "Install pygit2 to browse local git repos",
- },
- "smb": {
- "class": "fsspec.implementations.smb.SMBFileSystem",
- "err": 'SMB requires "smbprotocol" or "smbprotocol[kerberos]" installed',
- },
- "jupyter": {
- "class": "fsspec.implementations.jupyter.JupyterFileSystem",
- "err": "Jupyter FS requires requests to be installed",
- },
- "jlab": {
- "class": "fsspec.implementations.jupyter.JupyterFileSystem",
- "err": "Jupyter FS requires requests to be installed",
- },
- "libarchive": {
- "class": "fsspec.implementations.libarchive.LibArchiveFileSystem",
- "err": "LibArchive requires to be installed",
- },
- "reference": {"class": "fsspec.implementations.reference.ReferenceFileSystem"},
- "generic": {"class": "fsspec.generic.GenericFileSystem"},
- "oss": {
- "class": "ossfs.OSSFileSystem",
- "err": "Install ossfs to access Alibaba Object Storage System",
- },
- "webdav": {
- "class": "webdav4.fsspec.WebdavFileSystem",
- "err": "Install webdav4 to access WebDAV",
- },
- "dvc": {
- "class": "dvc.api.DVCFileSystem",
- "err": "Install dvc to access DVCFileSystem",
- },
- "hf": {
- "class": "huggingface_hub.HfFileSystem",
- "err": "Install huggingface_hub to access HfFileSystem",
- },
- "root": {
- "class": "fsspec_xrootd.XRootDFileSystem",
- "err": "Install fsspec-xrootd to access xrootd storage system."
- + " Note: 'root' is the protocol name for xrootd storage systems,"
- + " not referring to root directories",
- },
- "dir": {"class": "fsspec.implementations.dirfs.DirFileSystem"},
- "box": {
- "class": "boxfs.BoxFileSystem",
- "err": "Please install boxfs to access BoxFileSystem",
- },
- "lakefs": {
- "class": "lakefs_spec.LakeFSFileSystem",
- "err": "Please install lakefs-spec to access LakeFSFileSystem",
- },
-}
-
-
-def get_filesystem_class(protocol):
- """Fetch named protocol implementation from the registry
-
- The dict ``known_implementations`` maps protocol names to the locations
- of classes implementing the corresponding file-system. When used for the
- first time, appropriate imports will happen and the class will be placed in
- the registry. All subsequent calls will fetch directly from the registry.
-
- Some protocol implementations require additional dependencies, and so the
- import may fail. In this case, the string in the "err" field of the
- ``known_implementations`` will be given as the error message.
- """
- if not protocol:
- protocol = default
-
- if protocol not in registry:
- if protocol not in known_implementations:
- raise ValueError("Protocol not known: %s" % protocol)
- bit = known_implementations[protocol]
- try:
- register_implementation(protocol, _import_class(bit["class"]))
- except ImportError as e:
- raise ImportError(bit["err"]) from e
- cls = registry[protocol]
- if getattr(cls, "protocol", None) in ("abstract", None):
- cls.protocol = protocol
-
- return cls
-
-
-s3_msg = """Your installed version of s3fs is very old and known to cause
-severe performance issues, see also https://github.com/dask/dask/issues/10276
-
-To fix, you should specify a lower version bound on s3fs, or
-update the current installation.
-"""
-
-
-def _import_class(cls, minv=None):
- """Take a string FQP and return the imported class or identifier
-
- clas is of the form "package.module.klass" or "package.module:subobject.klass"
- """
- if ":" in cls:
- mod, name = cls.rsplit(":", 1)
- s3 = mod == "s3fs"
- mod = importlib.import_module(mod)
- if s3 and mod.__version__.split(".") < ["0", "5"]:
- warnings.warn(s3_msg)
- for part in name.split("."):
- mod = getattr(mod, part)
- return mod
- else:
- mod, name = cls.rsplit(".", 1)
- s3 = mod == "s3fs"
- mod = importlib.import_module(mod)
- if s3 and mod.__version__.split(".") < ["0", "5"]:
- warnings.warn(s3_msg)
- return getattr(mod, name)
-
-
-def filesystem(protocol, **storage_options):
- """Instantiate filesystems for given protocol and arguments
-
- ``storage_options`` are specific to the protocol being chosen, and are
- passed directly to the class.
- """
- if protocol == "arrow_hdfs":
- warnings.warn(
- "The 'arrow_hdfs' protocol has been deprecated and will be "
- "removed in the future. Specify it as 'hdfs'.",
- DeprecationWarning,
- )
-
- cls = get_filesystem_class(protocol)
- return cls(**storage_options)
-
-
-def available_protocols():
- """Return a list of the implemented protocols.
-
- Note that any given protocol may require extra packages to be importable.
- """
- return list(known_implementations)
diff --git a/spaces/joushe/moe-tts/text/sanskrit.py b/spaces/joushe/moe-tts/text/sanskrit.py
deleted file mode 100644
index 0223aaac384a2f850f5bc20651fc18eb964607d0..0000000000000000000000000000000000000000
--- a/spaces/joushe/moe-tts/text/sanskrit.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import re
-from indic_transliteration import sanscript
-
-
-# List of (iast, ipa) pairs:
-_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('a', 'ə'),
- ('ā', 'aː'),
- ('ī', 'iː'),
- ('ū', 'uː'),
- ('ṛ', 'ɹ`'),
- ('ṝ', 'ɹ`ː'),
- ('ḷ', 'l`'),
- ('ḹ', 'l`ː'),
- ('e', 'eː'),
- ('o', 'oː'),
- ('k', 'k⁼'),
- ('k⁼h', 'kʰ'),
- ('g', 'g⁼'),
- ('g⁼h', 'gʰ'),
- ('ṅ', 'ŋ'),
- ('c', 'ʧ⁼'),
- ('ʧ⁼h', 'ʧʰ'),
- ('j', 'ʥ⁼'),
- ('ʥ⁼h', 'ʥʰ'),
- ('ñ', 'n^'),
- ('ṭ', 't`⁼'),
- ('t`⁼h', 't`ʰ'),
- ('ḍ', 'd`⁼'),
- ('d`⁼h', 'd`ʰ'),
- ('ṇ', 'n`'),
- ('t', 't⁼'),
- ('t⁼h', 'tʰ'),
- ('d', 'd⁼'),
- ('d⁼h', 'dʰ'),
- ('p', 'p⁼'),
- ('p⁼h', 'pʰ'),
- ('b', 'b⁼'),
- ('b⁼h', 'bʰ'),
- ('y', 'j'),
- ('ś', 'ʃ'),
- ('ṣ', 's`'),
- ('r', 'ɾ'),
- ('l̤', 'l`'),
- ('h', 'ɦ'),
- ("'", ''),
- ('~', '^'),
- ('ṃ', '^')
-]]
-
-
-def devanagari_to_ipa(text):
- text = text.replace('ॐ', 'ओम्')
- text = re.sub(r'\s*।\s*$', '.', text)
- text = re.sub(r'\s*।\s*', ', ', text)
- text = re.sub(r'\s*॥', '.', text)
- text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
- for regex, replacement in _iast_to_ipa:
- text = re.sub(regex, replacement, text)
- text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
- [:-1]+'h'+x.group(1)+'*', text)
- return text
diff --git a/spaces/justest/ai-support/README.md b/spaces/justest/ai-support/README.md
deleted file mode 100644
index b4b34ff25b551529947161a19a670e4c5718b0dd..0000000000000000000000000000000000000000
--- a/spaces/justest/ai-support/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: ai-support
-emoji: 💻🐳
-colorFrom: red
-colorTo: blue
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/kadirnar/AnimeSR/README.md b/spaces/kadirnar/AnimeSR/README.md
deleted file mode 100644
index 83ccae7d07e0ad2ef254cf3d46e632614b911d69..0000000000000000000000000000000000000000
--- a/spaces/kadirnar/AnimeSR/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: AnimeSR
-emoji: 👀
-colorFrom: green
-colorTo: purple
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: apache-2.0
-tags:
-- making-demos
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/kaicheng/ChatGPT_ad/modules/config.py b/spaces/kaicheng/ChatGPT_ad/modules/config.py
deleted file mode 100644
index c9224996dd7056508519be8cbe906746f362abb0..0000000000000000000000000000000000000000
--- a/spaces/kaicheng/ChatGPT_ad/modules/config.py
+++ /dev/null
@@ -1,190 +0,0 @@
-from collections import defaultdict
-from contextlib import contextmanager
-import os
-import logging
-import sys
-import commentjson as json
-
-from . import shared
-from . import presets
-
-
-__all__ = [
- "my_api_key",
- "authflag",
- "auth_list",
- "dockerflag",
- "retrieve_proxy",
- "log_level",
- "advance_docs",
- "update_doc_config",
- "usage_limit",
- "multi_api_key",
- "server_name",
- "server_port",
- "share",
- "hide_history_when_not_logged_in",
- "default_chuanhu_assistant_model"
-]
-
-# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
-# 同时,也可以为后续支持自定义功能提供config的帮助
-if os.path.exists("config.json"):
- with open("config.json", "r", encoding='utf-8') as f:
- config = json.load(f)
-else:
- config = {}
-
-lang_config = config.get("language", "auto")
-language = os.environ.get("LANGUAGE", lang_config)
-
-hide_history_when_not_logged_in = config.get("hide_history_when_not_logged_in", False)
-
-if os.path.exists("api_key.txt"):
- logging.info("检测到api_key.txt文件,正在进行迁移...")
- with open("api_key.txt", "r", encoding="utf-8") as f:
- config["openai_api_key"] = f.read().strip()
- os.rename("api_key.txt", "api_key(deprecated).txt")
- with open("config.json", "w", encoding='utf-8') as f:
- json.dump(config, f, indent=4, ensure_ascii=False)
-
-if os.path.exists("auth.json"):
- logging.info("检测到auth.json文件,正在进行迁移...")
- auth_list = []
- with open("auth.json", "r", encoding='utf-8') as f:
- auth = json.load(f)
- for _ in auth:
- if auth[_]["username"] and auth[_]["password"]:
- auth_list.append((auth[_]["username"], auth[_]["password"]))
- else:
- logging.error("请检查auth.json文件中的用户名和密码!")
- sys.exit(1)
- config["users"] = auth_list
- os.rename("auth.json", "auth(deprecated).json")
- with open("config.json", "w", encoding='utf-8') as f:
- json.dump(config, f, indent=4, ensure_ascii=False)
-
-## 处理docker if we are running in Docker
-dockerflag = config.get("dockerflag", False)
-if os.environ.get("dockerrun") == "yes":
- dockerflag = True
-
-## 处理 api-key 以及 允许的用户列表
-my_api_key = config.get("openai_api_key", "")
-my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
-
-xmchat_api_key = config.get("xmchat_api_key", "")
-os.environ["XMCHAT_API_KEY"] = xmchat_api_key
-
-minimax_api_key = config.get("minimax_api_key", "")
-os.environ["MINIMAX_API_KEY"] = minimax_api_key
-minimax_group_id = config.get("minimax_group_id", "")
-os.environ["MINIMAX_GROUP_ID"] = minimax_group_id
-
-
-usage_limit = os.environ.get("USAGE_LIMIT", config.get("usage_limit", 120))
-
-## 多账户机制
-multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
-if multi_api_key:
- api_key_list = config.get("api_key_list", [])
- if len(api_key_list) == 0:
- logging.error("多账号模式已开启,但api_key_list为空,请检查config.json")
- sys.exit(1)
- shared.state.set_api_key_queue(api_key_list)
-
-auth_list = config.get("users", []) # 实际上是使用者的列表
-authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度
-
-# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配
-api_host = os.environ.get("OPENAI_API_BASE", config.get("openai_api_base", None))
-if api_host is not None:
- shared.state.set_api_host(api_host)
-
-default_chuanhu_assistant_model = config.get("default_chuanhu_assistant_model", "gpt-3.5-turbo")
-for x in ["GOOGLE_CSE_ID", "GOOGLE_API_KEY", "WOLFRAM_ALPHA_APPID", "SERPAPI_API_KEY"]:
- if config.get(x, None) is not None:
- os.environ[x] = config[x]
-
-@contextmanager
-def retrieve_openai_api(api_key = None):
- old_api_key = os.environ.get("OPENAI_API_KEY", "")
- if api_key is None:
- os.environ["OPENAI_API_KEY"] = my_api_key
- yield my_api_key
- else:
- os.environ["OPENAI_API_KEY"] = api_key
- yield api_key
- os.environ["OPENAI_API_KEY"] = old_api_key
-
-## 处理log
-log_level = config.get("log_level", "INFO")
-logging.basicConfig(
- level=log_level,
- format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
-)
-
-## 处理代理:
-http_proxy = config.get("http_proxy", "")
-https_proxy = config.get("https_proxy", "")
-http_proxy = os.environ.get("HTTP_PROXY", http_proxy)
-https_proxy = os.environ.get("HTTPS_PROXY", https_proxy)
-
-# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错
-os.environ["HTTP_PROXY"] = ""
-os.environ["HTTPS_PROXY"] = ""
-
-local_embedding = config.get("local_embedding", False) # 是否使用本地embedding
-
-@contextmanager
-def retrieve_proxy(proxy=None):
- """
- 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理
- 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量
- """
- global http_proxy, https_proxy
- if proxy is not None:
- http_proxy = proxy
- https_proxy = proxy
- yield http_proxy, https_proxy
- else:
- old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"]
- os.environ["HTTP_PROXY"] = http_proxy
- os.environ["HTTPS_PROXY"] = https_proxy
- yield http_proxy, https_proxy # return new proxy
-
- # return old proxy
- os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var
-
-
-## 处理advance docs
-advance_docs = defaultdict(lambda: defaultdict(dict))
-advance_docs.update(config.get("advance_docs", {}))
-def update_doc_config(two_column_pdf):
- global advance_docs
- advance_docs["pdf"]["two_column"] = two_column_pdf
-
- logging.info(f"更新后的文件参数为:{advance_docs}")
-
-## 处理gradio.launch参数
-server_name = config.get("server_name", None)
-server_port = config.get("server_port", None)
-if server_name is None:
- if dockerflag:
- server_name = "0.0.0.0"
- else:
- server_name = "127.0.0.1"
-if server_port is None:
- if dockerflag:
- server_port = 7860
-
-assert server_port is None or type(server_port) == int, "要求port设置为int类型"
-
-# 设置默认model
-default_model = config.get("default_model", "")
-try:
- presets.DEFAULT_MODEL = presets.MODELS.index(default_model)
-except ValueError:
- pass
-
-share = config.get("share", False)
diff --git a/spaces/kangvcar/RealChar/CHANGELOG.md b/spaces/kangvcar/RealChar/CHANGELOG.md
deleted file mode 100644
index 8adabf357fb0f496f0e407a91aca915e78deaded..0000000000000000000000000000000000000000
--- a/spaces/kangvcar/RealChar/CHANGELOG.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# ChangeLog
-
-## [v0.0.1] - 2023-07-19
-Release Highlights:
-
-### Product releases and updates:
-- iOS App TestFlight public beta (link https://testflight.apple.com/join/JA6p9sZQ)
-- Rewrite Web codebase from vanilla JavaScript to use React framework w/ Javascript
-- Support Unicode in chat messages
-- Various UI refinements
-
-### Integration updates:
-- Support Azure OpenAI
-
-### Observability and quality updates:
-- Support Integration with LangSmith
-- Reduce Docker rebuild time to ~2 seconds
-- Support string based user ID
-- Support Session ID, Platform, Action Type in database records.
-
-### New Tutorial:
-[How to make your own AI character and run it locally](https://youtu.be/meg5Q8vdWeQ)
diff --git a/spaces/kcagle/AutoGPT/autogpt/speech/gtts.py b/spaces/kcagle/AutoGPT/autogpt/speech/gtts.py
deleted file mode 100644
index 1c3e9cae0567428582891b11eca42f82a64f5c8e..0000000000000000000000000000000000000000
--- a/spaces/kcagle/AutoGPT/autogpt/speech/gtts.py
+++ /dev/null
@@ -1,22 +0,0 @@
-""" GTTS Voice. """
-import os
-
-import gtts
-from playsound import playsound
-
-from autogpt.speech.base import VoiceBase
-
-
-class GTTSVoice(VoiceBase):
- """GTTS Voice."""
-
- def _setup(self) -> None:
- pass
-
- def _speech(self, text: str, _: int = 0) -> bool:
- """Play the given text."""
- tts = gtts.gTTS(text)
- tts.save("speech.mp3")
- playsound("speech.mp3", True)
- os.remove("speech.mp3")
- return True
diff --git a/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer_train.py b/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer_train.py
deleted file mode 100644
index 2743d590d882f209734b68921b84a9d23492942c..0000000000000000000000000000000000000000
--- a/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer_train.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from synthesizer.hparams import hparams
-from synthesizer.train import train
-from utils.argutils import print_args
-import argparse
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("run_id", type=str, help= \
- "Name for this model instance. If a model state from the same run ID was previously "
- "saved, the training will restart from there. Pass -f to overwrite saved states and "
- "restart from scratch.")
- parser.add_argument("syn_dir", type=str, default=argparse.SUPPRESS, help= \
- "Path to the synthesizer directory that contains the ground truth mel spectrograms, "
- "the wavs and the embeds.")
- parser.add_argument("-m", "--models_dir", type=str, default="synthesizer/saved_models/", help=\
- "Path to the output directory that will contain the saved model weights and the logs.")
- parser.add_argument("-s", "--save_every", type=int, default=1000, help= \
- "Number of steps between updates of the model on the disk. Set to 0 to never save the "
- "model.")
- parser.add_argument("-b", "--backup_every", type=int, default=25000, help= \
- "Number of steps between backups of the model. Set to 0 to never make backups of the "
- "model.")
- parser.add_argument("-f", "--force_restart", action="store_true", help= \
- "Do not load any saved model and restart from scratch.")
- parser.add_argument("--hparams", default="",
- help="Hyperparameter overrides as a comma-separated list of name=value "
- "pairs")
- args = parser.parse_args()
- print_args(args, parser)
-
- args.hparams = hparams.parse(args.hparams)
-
- # Run the training
- train(**vars(args))
diff --git a/spaces/kevinwang676/M4Singer/utils/audio.py b/spaces/kevinwang676/M4Singer/utils/audio.py
deleted file mode 100644
index aba7ab926cf793d085bbdc70c97f376001183fe1..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/M4Singer/utils/audio.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import subprocess
-import matplotlib
-
-matplotlib.use('Agg')
-import librosa
-import librosa.filters
-import numpy as np
-from scipy import signal
-from scipy.io import wavfile
-
-
-def save_wav(wav, path, sr, norm=False):
- if norm:
- wav = wav / np.abs(wav).max()
- wav *= 32767
- # proposed by @dsmiller
- wavfile.write(path, sr, wav.astype(np.int16))
-
-
-def get_hop_size(hparams):
- hop_size = hparams['hop_size']
- if hop_size is None:
- assert hparams['frame_shift_ms'] is not None
- hop_size = int(hparams['frame_shift_ms'] / 1000 * hparams['audio_sample_rate'])
- return hop_size
-
-
-###########################################################################################
-def _stft(y, hparams):
- return librosa.stft(y=y, n_fft=hparams['fft_size'], hop_length=get_hop_size(hparams),
- win_length=hparams['win_size'], pad_mode='constant')
-
-
-def _istft(y, hparams):
- return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams['win_size'])
-
-
-def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
- '''compute right padding (final frame) or both sides padding (first and final frames)
- '''
- assert pad_sides in (1, 2)
- # return int(fsize // 2)
- pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]
- if pad_sides == 1:
- return 0, pad
- else:
- return pad // 2, pad // 2 + pad % 2
-
-
-# Conversions
-def amp_to_db(x):
- return 20 * np.log10(np.maximum(1e-5, x))
-
-
-def normalize(S, hparams):
- return (S - hparams['min_level_db']) / -hparams['min_level_db']
diff --git a/spaces/kevinwang676/VoiceChanger/infer_pack/modules.py b/spaces/kevinwang676/VoiceChanger/infer_pack/modules.py
deleted file mode 100644
index 960481cedad9a6106f2bf0b9e86e82b120f7b33f..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VoiceChanger/infer_pack/modules.py
+++ /dev/null
@@ -1,522 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from infer_pack.transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(
- self,
- in_channels,
- hidden_channels,
- out_channels,
- kernel_size,
- n_layers,
- p_dropout,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(
- nn.Conv1d(
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
- for _ in range(n_layers - 1):
- self.conv_layers.append(
- nn.Conv1d(
- hidden_channels,
- hidden_channels,
- kernel_size,
- padding=kernel_size // 2,
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
-
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size**i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(
- nn.Conv1d(
- channels,
- channels,
- kernel_size,
- groups=channels,
- dilation=dilation,
- padding=padding,
- )
- )
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(
- self,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- p_dropout=0,
- ):
- super(WN, self).__init__()
- assert kernel_size % 2 == 1
- self.hidden_channels = hidden_channels
- self.kernel_size = (kernel_size,)
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(
- gin_channels, 2 * hidden_channels * n_layers, 1
- )
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
-
- for i in range(n_layers):
- dilation = dilation_rate**i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(
- hidden_channels,
- 2 * hidden_channels,
- kernel_size,
- dilation=dilation,
- padding=padding,
- )
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:, self.hidden_channels :, :]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2]),
- )
- ),
- ]
- )
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- ]
- )
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- ]
- )
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels, 1))
- self.logs = nn.Parameter(torch.zeros(channels, 1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1, 2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False,
- ):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=p_dropout,
- gin_channels=gin_channels,
- )
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1, 2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class ConvFlow(nn.Module):
- def __init__(
- self,
- in_channels,
- filter_channels,
- kernel_size,
- n_layers,
- num_bins=10,
- tail_bound=5.0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
- self.proj = nn.Conv1d(
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
- )
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
- self.filter_channels
- )
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(
- x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails="linear",
- tail_bound=self.tail_bound,
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/repeat.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/repeat.py
deleted file mode 100644
index 7a8af6ce850e930feb2bf0cd0e9bc7a8d21520e4..0000000000000000000000000000000000000000
--- a/spaces/kira4424/Tacotron-zero-short-voice-clone/ppg_extractor/encoder/repeat.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-# Copyright 2019 Shigeki Karita
-# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
-
-"""Repeat the same layer definition."""
-
-import torch
-
-
-class MultiSequential(torch.nn.Sequential):
- """Multi-input multi-output torch.nn.Sequential."""
-
- def forward(self, *args):
- """Repeat."""
- for m in self:
- args = m(*args)
- return args
-
-
-def repeat(N, fn):
- """Repeat module N times.
-
- :param int N: repeat time
- :param function fn: function to generate module
- :return: repeated modules
- :rtype: MultiSequential
- """
- return MultiSequential(*[fn(n) for n in range(N)])
diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh b/spaces/koajoel/PolyFormer/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh
deleted file mode 100644
index e3efeb21d302ef8d9eae8f1d4b06434c593705f6..0000000000000000000000000000000000000000
--- a/spaces/koajoel/PolyFormer/fairseq/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/bash
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-echo 'Cloning Moses github repository (for tokenization scripts)...'
-git clone https://github.com/moses-smt/mosesdecoder.git
-
-SCRIPTS=mosesdecoder/scripts
-TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
-CLEAN=$SCRIPTS/training/clean-corpus-n.perl
-REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl
-
-URLS=(
- "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz"
- "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz"
- "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz"
- "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz"
- "http://data.statmt.org/wmt17/translation-task/dev.tgz"
- "http://statmt.org/wmt14/test-full.tgz"
-)
-CORPORA=(
- "training/europarl-v7.de-en"
- "commoncrawl.de-en"
- "training-parallel-nc-v13/news-commentary-v13.de-en"
- "rapid2016.de-en"
-)
-
-if [ ! -d "$SCRIPTS" ]; then
- echo "Please set SCRIPTS variable correctly to point to Moses scripts."
- exit
-fi
-
-src=en
-tgt=de
-lang=en-de
-prep=wmt18_en_de
-tmp=$prep/tmp
-orig=orig
-dev=dev/newstest2012
-codes=32000
-bpe=bpe.32k
-
-mkdir -p $orig $tmp $prep $bpe
-
-cd $orig
-
-for ((i=0;i<${#URLS[@]};++i)); do
- url=${URLS[i]}
- file=$(basename $url)
- if [ -f $file ]; then
- echo "$file already exists, skipping download"
- else
- wget "$url"
- if [ -f $file ]; then
- echo "$url successfully downloaded."
- else
- echo "$url not successfully downloaded."
- exit 1
- fi
- if [ ${file: -4} == ".tgz" ]; then
- tar zxvf $file
- elif [ ${file: -4} == ".tar" ]; then
- tar xvf $file
- fi
- fi
-done
-cd ..
-
-echo "pre-processing train data..."
-for l in $src $tgt; do
- rm -rf $tmp/train.tags.$lang.tok.$l
- for f in "${CORPORA[@]}"; do
- cat $orig/$f.$l | \
- perl $REM_NON_PRINT_CHAR | \
- perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/train.tags.$lang.tok.$l
- done
-done
-
-echo "pre-processing test data..."
-for l in $src $tgt; do
- if [ "$l" == "$src" ]; then
- t="src"
- else
- t="ref"
- fi
- grep '\s*//g' | \
- sed -e 's/\s*<\/seg>\s*//g' | \
- sed -e "s/\’/\'/g" | \
- perl $TOKENIZER -threads 8 -l $l -no-escape > $tmp/test.$l
- echo ""
-done
-
-# apply length filtering before BPE
-perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train 1 100
-
-# use newstest2012 for valid
-echo "pre-processing valid data..."
-for l in $src $tgt; do
- rm -rf $tmp/valid.$l
- cat $orig/$dev.$l | \
- perl $REM_NON_PRINT_CHAR | \
- perl $TOKENIZER -threads 8 -l $l -no-escape >> $tmp/valid.$l
-done
-
-mkdir output
-mv $tmp/{train,valid,test}.{$src,$tgt} output
-
-#BPE
-git clone https://github.com/glample/fastBPE.git
-pushd fastBPE
-g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast
-popd
-fastBPE/fast learnbpe $codes output/train.$src output/train.$tgt > $bpe/codes
-for split in {train,valid,test}; do for lang in {en,de}; do fastBPE/fast applybpe $bpe/$split.$lang output/$split.$lang $bpe/codes; done; done
diff --git a/spaces/kukuhtw/AutoGPT/README.md b/spaces/kukuhtw/AutoGPT/README.md
deleted file mode 100644
index 5bf09b995f04f7af05d1314906b1b1ff39c20ddc..0000000000000000000000000000000000000000
--- a/spaces/kukuhtw/AutoGPT/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AutoGPT
-emoji: 🦾
-colorFrom: yellow
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.27.0
-app_file: ui/app.py
-pinned: false
-license: mit
-duplicated_from: aliabid94/AutoGPT
----
-
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/resolver.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/resolver.py
deleted file mode 100644
index 531ce93fccc2d3be442556de644cdc78d31d9c6e..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/resolver.py
+++ /dev/null
@@ -1,160 +0,0 @@
-import asyncio
-import socket
-from typing import Any, Dict, List, Optional, Type, Union
-
-from .abc import AbstractResolver
-from .helpers import get_running_loop
-
-__all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver")
-
-try:
- import aiodns
-
- # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
-except ImportError: # pragma: no cover
- aiodns = None
-
-aiodns_default = False
-
-
-class ThreadedResolver(AbstractResolver):
- """Threaded resolver.
-
- Uses an Executor for synchronous getaddrinfo() calls.
- concurrent.futures.ThreadPoolExecutor is used by default.
- """
-
- def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
- self._loop = get_running_loop(loop)
-
- async def resolve(
- self, hostname: str, port: int = 0, family: int = socket.AF_INET
- ) -> List[Dict[str, Any]]:
- infos = await self._loop.getaddrinfo(
- hostname,
- port,
- type=socket.SOCK_STREAM,
- family=family,
- flags=socket.AI_ADDRCONFIG,
- )
-
- hosts = []
- for family, _, proto, _, address in infos:
- if family == socket.AF_INET6:
- if len(address) < 3:
- # IPv6 is not supported by Python build,
- # or IPv6 is not enabled in the host
- continue
- if address[3]: # type: ignore[misc]
- # This is essential for link-local IPv6 addresses.
- # LL IPv6 is a VERY rare case. Strictly speaking, we should use
- # getnameinfo() unconditionally, but performance makes sense.
- host, _port = socket.getnameinfo(
- address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
- )
- port = int(_port)
- else:
- host, port = address[:2]
- else: # IPv4
- assert family == socket.AF_INET
- host, port = address # type: ignore[misc]
- hosts.append(
- {
- "hostname": hostname,
- "host": host,
- "port": port,
- "family": family,
- "proto": proto,
- "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
- }
- )
-
- return hosts
-
- async def close(self) -> None:
- pass
-
-
-class AsyncResolver(AbstractResolver):
- """Use the `aiodns` package to make asynchronous DNS lookups"""
-
- def __init__(
- self,
- loop: Optional[asyncio.AbstractEventLoop] = None,
- *args: Any,
- **kwargs: Any
- ) -> None:
- if aiodns is None:
- raise RuntimeError("Resolver requires aiodns library")
-
- self._loop = get_running_loop(loop)
- self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)
-
- if not hasattr(self._resolver, "gethostbyname"):
- # aiodns 1.1 is not available, fallback to DNSResolver.query
- self.resolve = self._resolve_with_query # type: ignore
-
- async def resolve(
- self, host: str, port: int = 0, family: int = socket.AF_INET
- ) -> List[Dict[str, Any]]:
- try:
- resp = await self._resolver.gethostbyname(host, family)
- except aiodns.error.DNSError as exc:
- msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
- raise OSError(msg) from exc
- hosts = []
- for address in resp.addresses:
- hosts.append(
- {
- "hostname": host,
- "host": address,
- "port": port,
- "family": family,
- "proto": 0,
- "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
- }
- )
-
- if not hosts:
- raise OSError("DNS lookup failed")
-
- return hosts
-
- async def _resolve_with_query(
- self, host: str, port: int = 0, family: int = socket.AF_INET
- ) -> List[Dict[str, Any]]:
- if family == socket.AF_INET6:
- qtype = "AAAA"
- else:
- qtype = "A"
-
- try:
- resp = await self._resolver.query(host, qtype)
- except aiodns.error.DNSError as exc:
- msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
- raise OSError(msg) from exc
-
- hosts = []
- for rr in resp:
- hosts.append(
- {
- "hostname": host,
- "host": rr.host,
- "port": port,
- "family": family,
- "proto": 0,
- "flags": socket.AI_NUMERICHOST,
- }
- )
-
- if not hosts:
- raise OSError("DNS lookup failed")
-
- return hosts
-
- async def close(self) -> None:
- self._resolver.cancel()
-
-
-_DefaultType = Type[Union[AsyncResolver, ThreadedResolver]]
-DefaultResolver: _DefaultType = AsyncResolver if aiodns_default else ThreadedResolver
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/github.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/github.py
deleted file mode 100644
index b148124d7481bb867cb100ad1ab2213e6acadf56..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/github.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import requests
-
-from ..spec import AbstractFileSystem
-from ..utils import infer_storage_options
-from .memory import MemoryFile
-
-# TODO: add GIST backend, would be very similar
-
-
-class GithubFileSystem(AbstractFileSystem):
- """Interface to files in github
-
- An instance of this class provides the files residing within a remote github
- repository. You may specify a point in the repos history, by SHA, branch
- or tag (default is current master).
-
- Given that code files tend to be small, and that github does not support
- retrieving partial content, we always fetch whole files.
-
- When using fsspec.open, allows URIs of the form:
-
- - "github://path/file", in which case you must specify org, repo and
- may specify sha in the extra args
- - 'github://org:repo@/precip/catalog.yml', where the org and repo are
- part of the URI
- - 'github://org:repo@sha/precip/catalog.yml', where the sha is also included
-
- ``sha`` can be the full or abbreviated hex of the commit you want to fetch
- from, or a branch or tag name (so long as it doesn't contain special characters
- like "/", "?", which would have to be HTTP-encoded).
-
- For authorised access, you must provide username and token, which can be made
- at https://github.com/settings/tokens
- """
-
- url = "https://api.github.com/repos/{org}/{repo}/git/trees/{sha}"
- rurl = "https://raw.githubusercontent.com/{org}/{repo}/{sha}/{path}"
- protocol = "github"
-
- def __init__(self, org, repo, sha=None, username=None, token=None, **kwargs):
- super().__init__(**kwargs)
- self.org = org
- self.repo = repo
- if (username is None) ^ (token is None):
- raise ValueError("Auth required both username and token")
- self.username = username
- self.token = token
- if sha is None:
- # look up default branch (not necessarily "master")
- u = "https://api.github.com/repos/{org}/{repo}"
- r = requests.get(u.format(org=org, repo=repo), **self.kw)
- r.raise_for_status()
- sha = r.json()["default_branch"]
-
- self.root = sha
- self.ls("")
-
- @property
- def kw(self):
- if self.username:
- return {"auth": (self.username, self.token)}
- return {}
-
- @classmethod
- def repos(cls, org_or_user, is_org=True):
- """List repo names for given org or user
-
- This may become the top level of the FS
-
- Parameters
- ----------
- org_or_user: str
- Name of the github org or user to query
- is_org: bool (default True)
- Whether the name is an organisation (True) or user (False)
-
- Returns
- -------
- List of string
- """
- r = requests.get(
- "https://api.github.com/{part}/{org}/repos".format(
- part=["users", "orgs"][is_org], org=org_or_user
- )
- )
- r.raise_for_status()
- return [repo["name"] for repo in r.json()]
-
- @property
- def tags(self):
- """Names of tags in the repo"""
- r = requests.get(
- "https://api.github.com/repos/{org}/{repo}/tags"
- "".format(org=self.org, repo=self.repo),
- **self.kw,
- )
- r.raise_for_status()
- return [t["name"] for t in r.json()]
-
- @property
- def branches(self):
- """Names of branches in the repo"""
- r = requests.get(
- "https://api.github.com/repos/{org}/{repo}/branches"
- "".format(org=self.org, repo=self.repo),
- **self.kw,
- )
- r.raise_for_status()
- return [t["name"] for t in r.json()]
-
- @property
- def refs(self):
- """Named references, tags and branches"""
- return {"tags": self.tags, "branches": self.branches}
-
- def ls(self, path, detail=False, sha=None, _sha=None, **kwargs):
- """List files at given path
-
- Parameters
- ----------
- path: str
- Location to list, relative to repo root
- detail: bool
- If True, returns list of dicts, one per file; if False, returns
- list of full filenames only
- sha: str (optional)
- List at the given point in the repo history, branch or tag name or commit
- SHA
- _sha: str (optional)
- List this specific tree object (used internally to descend into trees)
- """
- path = self._strip_protocol(path)
- if path == "":
- _sha = sha or self.root
- if _sha is None:
- parts = path.rstrip("/").split("/")
- so_far = ""
- _sha = sha or self.root
- for part in parts:
- out = self.ls(so_far, True, sha=sha, _sha=_sha)
- so_far += "/" + part if so_far else part
- out = [o for o in out if o["name"] == so_far]
- if not out:
- raise FileNotFoundError(path)
- out = out[0]
- if out["type"] == "file":
- if detail:
- return [out]
- else:
- return path
- _sha = out["sha"]
- if path not in self.dircache or sha not in [self.root, None]:
- r = requests.get(
- self.url.format(org=self.org, repo=self.repo, sha=_sha), **self.kw
- )
- if r.status_code == 404:
- raise FileNotFoundError(path)
- r.raise_for_status()
- types = {"blob": "file", "tree": "directory"}
- out = [
- {
- "name": path + "/" + f["path"] if path else f["path"],
- "mode": f["mode"],
- "type": types[f["type"]],
- "size": f.get("size", 0),
- "sha": f["sha"],
- }
- for f in r.json()["tree"]
- if f["type"] in types
- ]
- if sha in [self.root, None]:
- self.dircache[path] = out
- else:
- out = self.dircache[path]
- if detail:
- return out
- else:
- return sorted([f["name"] for f in out])
-
- def invalidate_cache(self, path=None):
- self.dircache.clear()
-
- @classmethod
- def _strip_protocol(cls, path):
- opts = infer_storage_options(path)
- if "username" not in opts:
- return super()._strip_protocol(path)
- return opts["path"].lstrip("/")
-
- @staticmethod
- def _get_kwargs_from_urls(path):
- opts = infer_storage_options(path)
- if "username" not in opts:
- return {}
- out = {"org": opts["username"], "repo": opts["password"]}
- if opts["host"]:
- out["sha"] = opts["host"]
- return out
-
- def _open(
- self,
- path,
- mode="rb",
- block_size=None,
- autocommit=True,
- cache_options=None,
- sha=None,
- **kwargs,
- ):
- if mode != "rb":
- raise NotImplementedError
- url = self.rurl.format(
- org=self.org, repo=self.repo, path=path, sha=sha or self.root
- )
- r = requests.get(url, **self.kw)
- if r.status_code == 404:
- raise FileNotFoundError(path)
- r.raise_for_status()
- return MemoryFile(None, None, r.content)
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/exceptions.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/exceptions.py
deleted file mode 100644
index 87db3df3a6dde1bbc0aae1128ca21f365e774666..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/exceptions.py
+++ /dev/null
@@ -1,396 +0,0 @@
-"""
-Validation errors, and some surrounding helpers.
-"""
-from __future__ import annotations
-
-from collections import defaultdict, deque
-from pprint import pformat
-from textwrap import dedent, indent
-import heapq
-import itertools
-
-import attr
-
-from jsonschema import _utils
-
-WEAK_MATCHES: frozenset[str] = frozenset(["anyOf", "oneOf"])
-STRONG_MATCHES: frozenset[str] = frozenset()
-
-_unset = _utils.Unset()
-
-
-class _Error(Exception):
- def __init__(
- self,
- message,
- validator=_unset,
- path=(),
- cause=None,
- context=(),
- validator_value=_unset,
- instance=_unset,
- schema=_unset,
- schema_path=(),
- parent=None,
- type_checker=_unset,
- ):
- super(_Error, self).__init__(
- message,
- validator,
- path,
- cause,
- context,
- validator_value,
- instance,
- schema,
- schema_path,
- parent,
- )
- self.message = message
- self.path = self.relative_path = deque(path)
- self.schema_path = self.relative_schema_path = deque(schema_path)
- self.context = list(context)
- self.cause = self.__cause__ = cause
- self.validator = validator
- self.validator_value = validator_value
- self.instance = instance
- self.schema = schema
- self.parent = parent
- self._type_checker = type_checker
-
- for error in context:
- error.parent = self
-
- def __repr__(self):
- return f"<{self.__class__.__name__}: {self.message!r}>"
-
- def __str__(self):
- essential_for_verbose = (
- self.validator, self.validator_value, self.instance, self.schema,
- )
- if any(m is _unset for m in essential_for_verbose):
- return self.message
-
- schema_path = _utils.format_as_index(
- container=self._word_for_schema_in_error_message,
- indices=list(self.relative_schema_path)[:-1],
- )
- instance_path = _utils.format_as_index(
- container=self._word_for_instance_in_error_message,
- indices=self.relative_path,
- )
- prefix = 16 * " "
-
- return dedent(
- f"""\
- {self.message}
-
- Failed validating {self.validator!r} in {schema_path}:
- {indent(pformat(self.schema, width=72), prefix).lstrip()}
-
- On {instance_path}:
- {indent(pformat(self.instance, width=72), prefix).lstrip()}
- """.rstrip(),
- )
-
- @classmethod
- def create_from(cls, other):
- return cls(**other._contents())
-
- @property
- def absolute_path(self):
- parent = self.parent
- if parent is None:
- return self.relative_path
-
- path = deque(self.relative_path)
- path.extendleft(reversed(parent.absolute_path))
- return path
-
- @property
- def absolute_schema_path(self):
- parent = self.parent
- if parent is None:
- return self.relative_schema_path
-
- path = deque(self.relative_schema_path)
- path.extendleft(reversed(parent.absolute_schema_path))
- return path
-
- @property
- def json_path(self):
- path = "$"
- for elem in self.absolute_path:
- if isinstance(elem, int):
- path += "[" + str(elem) + "]"
- else:
- path += "." + elem
- return path
-
- def _set(self, type_checker=None, **kwargs):
- if type_checker is not None and self._type_checker is _unset:
- self._type_checker = type_checker
-
- for k, v in kwargs.items():
- if getattr(self, k) is _unset:
- setattr(self, k, v)
-
- def _contents(self):
- attrs = (
- "message", "cause", "context", "validator", "validator_value",
- "path", "schema_path", "instance", "schema", "parent",
- )
- return dict((attr, getattr(self, attr)) for attr in attrs)
-
- def _matches_type(self):
- try:
- expected = self.schema["type"]
- except (KeyError, TypeError):
- return False
-
- if isinstance(expected, str):
- return self._type_checker.is_type(self.instance, expected)
-
- return any(
- self._type_checker.is_type(self.instance, expected_type)
- for expected_type in expected
- )
-
-
-class ValidationError(_Error):
- """
- An instance was invalid under a provided schema.
- """
-
- _word_for_schema_in_error_message = "schema"
- _word_for_instance_in_error_message = "instance"
-
-
-class SchemaError(_Error):
- """
- A schema was invalid under its corresponding metaschema.
- """
-
- _word_for_schema_in_error_message = "metaschema"
- _word_for_instance_in_error_message = "schema"
-
-
-@attr.s(hash=True)
-class RefResolutionError(Exception):
- """
- A ref could not be resolved.
- """
-
- _cause = attr.ib()
-
- def __str__(self):
- return str(self._cause)
-
-
-class UndefinedTypeCheck(Exception):
- """
- A type checker was asked to check a type it did not have registered.
- """
-
- def __init__(self, type):
- self.type = type
-
- def __str__(self):
- return f"Type {self.type!r} is unknown to this type checker"
-
-
-class UnknownType(Exception):
- """
- A validator was asked to validate an instance against an unknown type.
- """
-
- def __init__(self, type, instance, schema):
- self.type = type
- self.instance = instance
- self.schema = schema
-
- def __str__(self):
- prefix = 16 * " "
-
- return dedent(
- f"""\
- Unknown type {self.type!r} for validator with schema:
- {indent(pformat(self.schema, width=72), prefix).lstrip()}
-
- While checking instance:
- {indent(pformat(self.instance, width=72), prefix).lstrip()}
- """.rstrip(),
- )
-
-
-class FormatError(Exception):
- """
- Validating a format failed.
- """
-
- def __init__(self, message, cause=None):
- super(FormatError, self).__init__(message, cause)
- self.message = message
- self.cause = self.__cause__ = cause
-
- def __str__(self):
- return self.message
-
-
-class ErrorTree:
- """
- ErrorTrees make it easier to check which validations failed.
- """
-
- _instance = _unset
-
- def __init__(self, errors=()):
- self.errors = {}
- self._contents = defaultdict(self.__class__)
-
- for error in errors:
- container = self
- for element in error.path:
- container = container[element]
- container.errors[error.validator] = error
-
- container._instance = error.instance
-
- def __contains__(self, index):
- """
- Check whether ``instance[index]`` has any errors.
- """
-
- return index in self._contents
-
- def __getitem__(self, index):
- """
- Retrieve the child tree one level down at the given ``index``.
-
- If the index is not in the instance that this tree corresponds
- to and is not known by this tree, whatever error would be raised
- by ``instance.__getitem__`` will be propagated (usually this is
- some subclass of `LookupError`.
- """
-
- if self._instance is not _unset and index not in self:
- self._instance[index]
- return self._contents[index]
-
- def __setitem__(self, index, value):
- """
- Add an error to the tree at the given ``index``.
- """
- self._contents[index] = value
-
- def __iter__(self):
- """
- Iterate (non-recursively) over the indices in the instance with errors.
- """
-
- return iter(self._contents)
-
- def __len__(self):
- """
- Return the `total_errors`.
- """
- return self.total_errors
-
- def __repr__(self):
- total = len(self)
- errors = "error" if total == 1 else "errors"
- return f"<{self.__class__.__name__} ({total} total {errors})>"
-
- @property
- def total_errors(self):
- """
- The total number of errors in the entire tree, including children.
- """
-
- child_errors = sum(len(tree) for _, tree in self._contents.items())
- return len(self.errors) + child_errors
-
-
-def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
- """
- Create a key function that can be used to sort errors by relevance.
-
- Arguments:
- weak (set):
- a collection of validation keywords to consider to be
- "weak". If there are two errors at the same level of the
- instance and one is in the set of weak validation keywords,
- the other error will take priority. By default, :kw:`anyOf`
- and :kw:`oneOf` are considered weak keywords and will be
- superseded by other same-level validation errors.
-
- strong (set):
- a collection of validation keywords to consider to be
- "strong"
- """
- def relevance(error):
- validator = error.validator
- return (
- -len(error.path),
- validator not in weak,
- validator in strong,
- not error._matches_type(),
- )
- return relevance
-
-
-relevance = by_relevance()
-
-
-def best_match(errors, key=relevance):
- """
- Try to find an error that appears to be the best match among given errors.
-
- In general, errors that are higher up in the instance (i.e. for which
- `ValidationError.path` is shorter) are considered better matches,
- since they indicate "more" is wrong with the instance.
-
- If the resulting match is either :kw:`oneOf` or :kw:`anyOf`, the
- *opposite* assumption is made -- i.e. the deepest error is picked,
- since these keywords only need to match once, and any other errors
- may not be relevant.
-
- Arguments:
- errors (collections.abc.Iterable):
-
- the errors to select from. Do not provide a mixture of
- errors from different validation attempts (i.e. from
- different instances or schemas), since it won't produce
- sensical output.
-
- key (collections.abc.Callable):
-
- the key to use when sorting errors. See `relevance` and
- transitively `by_relevance` for more details (the default is
- to sort with the defaults of that function). Changing the
- default is only useful if you want to change the function
- that rates errors but still want the error context descent
- done by this function.
-
- Returns:
- the best matching error, or ``None`` if the iterable was empty
-
- .. note::
-
- This function is a heuristic. Its return value may change for a given
- set of inputs from version to version if better heuristics are added.
- """
- errors = iter(errors)
- best = next(errors, None)
- if best is None:
- return
- best = max(itertools.chain([best], errors), key=key)
-
- while best.context:
- # Calculate the minimum via nsmallest, because we don't recurse if
- # all nested errors have the same relevance (i.e. if min == max == all)
- smallest = heapq.nsmallest(2, best.context, key=key)
- if len(smallest) == 2 and key(smallest[0]) == key(smallest[1]):
- return best
- best = smallest[0]
- return best
diff --git a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/data/transforms.py b/spaces/leafShen/CodeFormer/CodeFormer/basicsr/data/transforms.py
deleted file mode 100644
index aead9dc73ed063e1c5865040eaa2652b26aa3ad3..0000000000000000000000000000000000000000
--- a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/data/transforms.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import cv2
-import random
-
-
-def mod_crop(img, scale):
- """Mod crop images, used during testing.
-
- Args:
- img (ndarray): Input image.
- scale (int): Scale factor.
-
- Returns:
- ndarray: Result image.
- """
- img = img.copy()
- if img.ndim in (2, 3):
- h, w = img.shape[0], img.shape[1]
- h_remainder, w_remainder = h % scale, w % scale
- img = img[:h - h_remainder, :w - w_remainder, ...]
- else:
- raise ValueError(f'Wrong img ndim: {img.ndim}.')
- return img
-
-
-def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path):
- """Paired random crop.
-
- It crops lists of lq and gt images with corresponding locations.
-
- Args:
- img_gts (list[ndarray] | ndarray): GT images. Note that all images
- should have the same shape. If the input is an ndarray, it will
- be transformed to a list containing itself.
- img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
- should have the same shape. If the input is an ndarray, it will
- be transformed to a list containing itself.
- gt_patch_size (int): GT patch size.
- scale (int): Scale factor.
- gt_path (str): Path to ground-truth.
-
- Returns:
- list[ndarray] | ndarray: GT images and LQ images. If returned results
- only have one element, just return ndarray.
- """
-
- if not isinstance(img_gts, list):
- img_gts = [img_gts]
- if not isinstance(img_lqs, list):
- img_lqs = [img_lqs]
-
- h_lq, w_lq, _ = img_lqs[0].shape
- h_gt, w_gt, _ = img_gts[0].shape
- lq_patch_size = gt_patch_size // scale
-
- if h_gt != h_lq * scale or w_gt != w_lq * scale:
- raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
- f'multiplication of LQ ({h_lq}, {w_lq}).')
- if h_lq < lq_patch_size or w_lq < lq_patch_size:
- raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
- f'({lq_patch_size}, {lq_patch_size}). '
- f'Please remove {gt_path}.')
-
- # randomly choose top and left coordinates for lq patch
- top = random.randint(0, h_lq - lq_patch_size)
- left = random.randint(0, w_lq - lq_patch_size)
-
- # crop lq patch
- img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]
-
- # crop corresponding gt patch
- top_gt, left_gt = int(top * scale), int(left * scale)
- img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]
- if len(img_gts) == 1:
- img_gts = img_gts[0]
- if len(img_lqs) == 1:
- img_lqs = img_lqs[0]
- return img_gts, img_lqs
-
-
-def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):
- """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
-
- We use vertical flip and transpose for rotation implementation.
- All the images in the list use the same augmentation.
-
- Args:
- imgs (list[ndarray] | ndarray): Images to be augmented. If the input
- is an ndarray, it will be transformed to a list.
- hflip (bool): Horizontal flip. Default: True.
- rotation (bool): Ratotation. Default: True.
- flows (list[ndarray]: Flows to be augmented. If the input is an
- ndarray, it will be transformed to a list.
- Dimension is (h, w, 2). Default: None.
- return_status (bool): Return the status of flip and rotation.
- Default: False.
-
- Returns:
- list[ndarray] | ndarray: Augmented images and flows. If returned
- results only have one element, just return ndarray.
-
- """
- hflip = hflip and random.random() < 0.5
- vflip = rotation and random.random() < 0.5
- rot90 = rotation and random.random() < 0.5
-
- def _augment(img):
- if hflip: # horizontal
- cv2.flip(img, 1, img)
- if vflip: # vertical
- cv2.flip(img, 0, img)
- if rot90:
- img = img.transpose(1, 0, 2)
- return img
-
- def _augment_flow(flow):
- if hflip: # horizontal
- cv2.flip(flow, 1, flow)
- flow[:, :, 0] *= -1
- if vflip: # vertical
- cv2.flip(flow, 0, flow)
- flow[:, :, 1] *= -1
- if rot90:
- flow = flow.transpose(1, 0, 2)
- flow = flow[:, :, [1, 0]]
- return flow
-
- if not isinstance(imgs, list):
- imgs = [imgs]
- imgs = [_augment(img) for img in imgs]
- if len(imgs) == 1:
- imgs = imgs[0]
-
- if flows is not None:
- if not isinstance(flows, list):
- flows = [flows]
- flows = [_augment_flow(flow) for flow in flows]
- if len(flows) == 1:
- flows = flows[0]
- return imgs, flows
- else:
- if return_status:
- return imgs, (hflip, vflip, rot90)
- else:
- return imgs
-
-
-def img_rotate(img, angle, center=None, scale=1.0):
- """Rotate image.
-
- Args:
- img (ndarray): Image to be rotated.
- angle (float): Rotation angle in degrees. Positive values mean
- counter-clockwise rotation.
- center (tuple[int]): Rotation center. If the center is None,
- initialize it as the center of the image. Default: None.
- scale (float): Isotropic scale factor. Default: 1.0.
- """
- (h, w) = img.shape[:2]
-
- if center is None:
- center = (w // 2, h // 2)
-
- matrix = cv2.getRotationMatrix2D(center, angle, scale)
- rotated_img = cv2.warpAffine(img, matrix, (w, h))
- return rotated_img
diff --git a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/utils/realesrgan_utils.py b/spaces/leafShen/CodeFormer/CodeFormer/basicsr/utils/realesrgan_utils.py
deleted file mode 100644
index 5a7b159b697d9e1ca0c51900ec1fe01f9feeb18f..0000000000000000000000000000000000000000
--- a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/utils/realesrgan_utils.py
+++ /dev/null
@@ -1,301 +0,0 @@
-import cv2
-import math
-import numpy as np
-import os
-import queue
-import threading
-import torch
-from basicsr.utils.download_util import load_file_from_url
-from torch.nn import functional as F
-
-# ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-
-
-class RealESRGANer():
- """A helper class for upsampling images with RealESRGAN.
-
- Args:
- scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4.
- model_path (str): The path to the pretrained model. It can be urls (will first download it automatically).
- model (nn.Module): The defined network. Default: None.
- tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop
- input images into tiles, and then process each of them. Finally, they will be merged into one image.
- 0 denotes for do not use tile. Default: 0.
- tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10.
- pre_pad (int): Pad the input images to avoid border artifacts. Default: 10.
- half (float): Whether to use half precision during inference. Default: False.
- """
-
- def __init__(self,
- scale,
- model_path,
- model=None,
- tile=0,
- tile_pad=10,
- pre_pad=10,
- half=False,
- device=None,
- gpu_id=None):
- self.scale = scale
- self.tile_size = tile
- self.tile_pad = tile_pad
- self.pre_pad = pre_pad
- self.mod_scale = None
- self.half = half
-
- # initialize model
- if gpu_id:
- self.device = torch.device(
- f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu') if device is None else device
- else:
- self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
- # if the model_path starts with https, it will first download models to the folder: realesrgan/weights
- if model_path.startswith('https://'):
- model_path = load_file_from_url(
- url=model_path, model_dir=os.path.join('weights/realesrgan'), progress=True, file_name=None)
- loadnet = torch.load(model_path, map_location=torch.device('cpu'))
- # prefer to use params_ema
- if 'params_ema' in loadnet:
- keyname = 'params_ema'
- else:
- keyname = 'params'
- model.load_state_dict(loadnet[keyname], strict=True)
- model.eval()
- self.model = model.to(self.device)
- if self.half:
- self.model = self.model.half()
-
- def pre_process(self, img):
- """Pre-process, such as pre-pad and mod pad, so that the images can be divisible
- """
- img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
- self.img = img.unsqueeze(0).to(self.device)
- if self.half:
- self.img = self.img.half()
-
- # pre_pad
- self.img_pre_pad = self.img.clone()
- if self.pre_pad != 0:
- self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect')
- # mod pad for divisible borders
- if self.scale == 2:
- self.mod_scale = 2
- elif self.scale == 1:
- self.mod_scale = 4
- if self.mod_scale is not None:
- self.mod_pad_h, self.mod_pad_w = 0, 0
- _, _, h, w = self.img.size()
- if (h % self.mod_scale != 0):
- self.mod_pad_h = (self.mod_scale - h % self.mod_scale)
- if (w % self.mod_scale != 0):
- self.mod_pad_w = (self.mod_scale - w % self.mod_scale)
- self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect')
-
- def process(self):
- # model inference
- self.output = self.model(self.img)
-
- def tile_process(self):
- """It will first crop input images to tiles, and then process each tile.
- Finally, all the processed tiles are merged into one images.
-
- Modified from: https://github.com/ata4/esrgan-launcher
- """
- batch, channel, height, width = self.img.shape
- output_height = height * self.scale
- output_width = width * self.scale
- output_shape = (batch, channel, output_height, output_width)
-
- # start with black image
- self.output = self.img.new_zeros(output_shape)
- tiles_x = math.ceil(width / self.tile_size)
- tiles_y = math.ceil(height / self.tile_size)
-
- # loop over all tiles
- for y in range(tiles_y):
- for x in range(tiles_x):
- # extract tile from input image
- ofs_x = x * self.tile_size
- ofs_y = y * self.tile_size
- # input tile area on total image
- input_start_x = ofs_x
- input_end_x = min(ofs_x + self.tile_size, width)
- input_start_y = ofs_y
- input_end_y = min(ofs_y + self.tile_size, height)
-
- # input tile area on total image with padding
- input_start_x_pad = max(input_start_x - self.tile_pad, 0)
- input_end_x_pad = min(input_end_x + self.tile_pad, width)
- input_start_y_pad = max(input_start_y - self.tile_pad, 0)
- input_end_y_pad = min(input_end_y + self.tile_pad, height)
-
- # input tile dimensions
- input_tile_width = input_end_x - input_start_x
- input_tile_height = input_end_y - input_start_y
- tile_idx = y * tiles_x + x + 1
- input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
-
- # upscale tile
- try:
- with torch.no_grad():
- output_tile = self.model(input_tile)
- except RuntimeError as error:
- print('Error', error)
- # print(f'\tTile {tile_idx}/{tiles_x * tiles_y}')
-
- # output tile area on total image
- output_start_x = input_start_x * self.scale
- output_end_x = input_end_x * self.scale
- output_start_y = input_start_y * self.scale
- output_end_y = input_end_y * self.scale
-
- # output tile area without padding
- output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
- output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
- output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
- output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
-
- # put tile into output image
- self.output[:, :, output_start_y:output_end_y,
- output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
- output_start_x_tile:output_end_x_tile]
-
- def post_process(self):
- # remove extra pad
- if self.mod_scale is not None:
- _, _, h, w = self.output.size()
- self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale]
- # remove prepad
- if self.pre_pad != 0:
- _, _, h, w = self.output.size()
- self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale]
- return self.output
-
- @torch.no_grad()
- def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'):
- h_input, w_input = img.shape[0:2]
- # img: numpy
- img = img.astype(np.float32)
- if np.max(img) > 256: # 16-bit image
- max_range = 65535
- print('\tInput is a 16-bit image')
- else:
- max_range = 255
- img = img / max_range
- if len(img.shape) == 2: # gray image
- img_mode = 'L'
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
- elif img.shape[2] == 4: # RGBA image with alpha channel
- img_mode = 'RGBA'
- alpha = img[:, :, 3]
- img = img[:, :, 0:3]
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- if alpha_upsampler == 'realesrgan':
- alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
- else:
- img_mode = 'RGB'
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
-
- # ------------------- process image (without the alpha channel) ------------------- #
- try:
- with torch.no_grad():
- self.pre_process(img)
- if self.tile_size > 0:
- self.tile_process()
- else:
- self.process()
- output_img_t = self.post_process()
- output_img = output_img_t.data.squeeze().float().cpu().clamp_(0, 1).numpy()
- output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
- if img_mode == 'L':
- output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
- del output_img_t
- torch.cuda.empty_cache()
- except RuntimeError as error:
- output_img = cv2.resize(self.img_pre_pad, (w_input * self.scale, h_input * self.scale), interpolation=cv2.INTER_LINEAR)
- print(f"Failed inference for RealESRGAN: {error}")
-
- # ------------------- process the alpha channel if necessary ------------------- #
- if img_mode == 'RGBA':
- if alpha_upsampler == 'realesrgan':
- self.pre_process(alpha)
- if self.tile_size > 0:
- self.tile_process()
- else:
- self.process()
- output_alpha = self.post_process()
- output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy()
- output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0))
- output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY)
- else: # use the cv2 resize for alpha channel
- h, w = alpha.shape[0:2]
- output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR)
-
- # merge the alpha channel
- output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA)
- output_img[:, :, 3] = output_alpha
-
- # ------------------------------ return ------------------------------ #
- if max_range == 65535: # 16-bit image
- output = (output_img * 65535.0).round().astype(np.uint16)
- else:
- output = (output_img * 255.0).round().astype(np.uint8)
-
- if outscale is not None and outscale != float(self.scale):
- output = cv2.resize(
- output, (
- int(w_input * outscale),
- int(h_input * outscale),
- ), interpolation=cv2.INTER_LANCZOS4)
-
- return output, img_mode
-
-
-class PrefetchReader(threading.Thread):
- """Prefetch images.
-
- Args:
- img_list (list[str]): A image list of image paths to be read.
- num_prefetch_queue (int): Number of prefetch queue.
- """
-
- def __init__(self, img_list, num_prefetch_queue):
- super().__init__()
- self.que = queue.Queue(num_prefetch_queue)
- self.img_list = img_list
-
- def run(self):
- for img_path in self.img_list:
- img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
- self.que.put(img)
-
- self.que.put(None)
-
- def __next__(self):
- next_item = self.que.get()
- if next_item is None:
- raise StopIteration
- return next_item
-
- def __iter__(self):
- return self
-
-
-class IOConsumer(threading.Thread):
-
- def __init__(self, opt, que, qid):
- super().__init__()
- self._queue = que
- self.qid = qid
- self.opt = opt
-
- def run(self):
- while True:
- msg = self._queue.get()
- if isinstance(msg, str) and msg == 'quit':
- break
-
- output = msg['output']
- save_path = msg['save_path']
- cv2.imwrite(save_path, output)
- print(f'IO worker {self.qid} is done.')
\ No newline at end of file
diff --git a/spaces/leave7/kazunaAI2.0/data_utils.py b/spaces/leave7/kazunaAI2.0/data_utils.py
deleted file mode 100644
index 9dfba4a9dfbfbd2b6ed5e771a5ffee4f70419ba3..0000000000000000000000000000000000000000
--- a/spaces/leave7/kazunaAI2.0/data_utils.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import time
-import os
-import random
-import numpy as np
-import torch
-import torch.utils.data
-
-import commons
-from mel_processing import spectrogram_torch, spec_to_mel_torch
-from utils import load_wav_to_torch, load_filepaths_and_text, transform
-
-# import h5py
-
-
-"""Multi speaker version"""
-
-
-class TextAudioSpeakerLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, speaker_id, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths, hparams):
- self.audiopaths = load_filepaths_and_text(audiopaths)
- self.max_wav_value = hparams.data.max_wav_value
- self.sampling_rate = hparams.data.sampling_rate
- self.filter_length = hparams.data.filter_length
- self.hop_length = hparams.data.hop_length
- self.win_length = hparams.data.win_length
- self.sampling_rate = hparams.data.sampling_rate
- self.use_sr = hparams.train.use_sr
- self.spec_len = hparams.train.max_speclen
- self.spk_map = hparams.spk
-
- random.seed(1234)
- random.shuffle(self.audiopaths)
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError("{} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate))
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if os.path.exists(spec_filename):
- spec = torch.load(spec_filename)
- else:
- spec = spectrogram_torch(audio_norm, self.filter_length,
- self.sampling_rate, self.hop_length, self.win_length,
- center=False)
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
-
- spk = filename.split(os.sep)[-2]
- spk = torch.LongTensor([self.spk_map[spk]])
-
- c = torch.load(filename + ".soft.pt").squeeze(0)
- c = torch.repeat_interleave(c, repeats=2, dim=1)
-
- f0 = np.load(filename + ".f0.npy")
- f0 = torch.FloatTensor(f0)
- lmin = min(c.size(-1), spec.size(-1), f0.shape[0])
- assert abs(c.size(-1) - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape, filename)
- assert abs(lmin - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape)
- assert abs(lmin - c.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape)
- spec, c, f0 = spec[:, :lmin], c[:, :lmin], f0[:lmin]
- audio_norm = audio_norm[:, :lmin * self.hop_length]
- _spec, _c, _audio_norm, _f0 = spec, c, audio_norm, f0
- while spec.size(-1) < self.spec_len:
- spec = torch.cat((spec, _spec), -1)
- c = torch.cat((c, _c), -1)
- f0 = torch.cat((f0, _f0), -1)
- audio_norm = torch.cat((audio_norm, _audio_norm), -1)
- start = random.randint(0, spec.size(-1) - self.spec_len)
- end = start + self.spec_len
- spec = spec[:, start:end]
- c = c[:, start:end]
- f0 = f0[start:end]
- audio_norm = audio_norm[:, start * self.hop_length:end * self.hop_length]
-
- return c, f0, spec, audio_norm, spk
-
- def __getitem__(self, index):
- return self.get_audio(self.audiopaths[index][0])
-
- def __len__(self):
- return len(self.audiopaths)
-
-
-class EvalDataLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, speaker_id, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths, hparams):
- self.audiopaths = load_filepaths_and_text(audiopaths)
- self.max_wav_value = hparams.data.max_wav_value
- self.sampling_rate = hparams.data.sampling_rate
- self.filter_length = hparams.data.filter_length
- self.hop_length = hparams.data.hop_length
- self.win_length = hparams.data.win_length
- self.sampling_rate = hparams.data.sampling_rate
- self.use_sr = hparams.train.use_sr
- self.audiopaths = self.audiopaths[:5]
- self.spk_map = hparams.spk
-
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError("{} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate))
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if os.path.exists(spec_filename):
- spec = torch.load(spec_filename)
- else:
- spec = spectrogram_torch(audio_norm, self.filter_length,
- self.sampling_rate, self.hop_length, self.win_length,
- center=False)
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
-
- spk = filename.split(os.sep)[-2]
- spk = torch.LongTensor([self.spk_map[spk]])
-
- c = torch.load(filename + ".soft.pt").squeeze(0)
-
- c = torch.repeat_interleave(c, repeats=2, dim=1)
-
- f0 = np.load(filename + ".f0.npy")
- f0 = torch.FloatTensor(f0)
- lmin = min(c.size(-1), spec.size(-1), f0.shape[0])
- assert abs(c.size(-1) - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape)
- assert abs(f0.shape[0] - spec.shape[-1]) < 4, (c.size(-1), spec.size(-1), f0.shape)
- spec, c, f0 = spec[:, :lmin], c[:, :lmin], f0[:lmin]
- audio_norm = audio_norm[:, :lmin * self.hop_length]
-
- return c, f0, spec, audio_norm, spk
-
- def __getitem__(self, index):
- return self.get_audio(self.audiopaths[index][0])
-
- def __len__(self):
- return len(self.audiopaths)
-
diff --git a/spaces/lewisliuX123/wechatllama2/docker/build.debian.sh b/spaces/lewisliuX123/wechatllama2/docker/build.debian.sh
deleted file mode 100644
index a5285f39813426a2d63eb01982229b23ec09dba2..0000000000000000000000000000000000000000
--- a/spaces/lewisliuX123/wechatllama2/docker/build.debian.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-CHATGPT_ON_WECHAT_TAG=1.0.2
-
-docker build -f Dockerfile.debian \
- --build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \
- -t zhayujie/chatgpt-on-wechat .
-
-docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-debian
\ No newline at end of file
diff --git a/spaces/lewiswu1209/MockingBird/vocoder/hifigan/env.py b/spaces/lewiswu1209/MockingBird/vocoder/hifigan/env.py
deleted file mode 100644
index 8f0d306d518d0d86a40d7ee992fbad6f04fe875f..0000000000000000000000000000000000000000
--- a/spaces/lewiswu1209/MockingBird/vocoder/hifigan/env.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import os
-import shutil
-
-def build_env(config, config_name, path):
- t_path = os.path.join(path, config_name)
- if config != t_path:
- os.makedirs(path, exist_ok=True)
- shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Ankhon Dekhi Movie Download 720p Torrents.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Ankhon Dekhi Movie Download 720p Torrents.md
deleted file mode 100644
index c8f70e8cb68bfbea68f58db951158120038cde82..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Ankhon Dekhi Movie Download 720p Torrents.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Ankhon Dekhi Movie Download 720p Torrents Download Zip ✵ https://bytlly.com/2uGvOV
-
-Ankhon Dekhi Full Movie Download 720p Kickass Torrent Ankhon Dekhi (2014) Full Hindi . Full Hindi Movie Watch Online And Download HD . 1fdad05405
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/City Car Driving 1.4.1 Crack [BETTER].md b/spaces/lincquiQcaudo/Top-20-Diffusion/City Car Driving 1.4.1 Crack [BETTER].md
deleted file mode 100644
index 8a6e9c4edd81d0c5990d8ef3842bf2b0178135e4..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/City Car Driving 1.4.1 Crack [BETTER].md
+++ /dev/null
@@ -1,6 +0,0 @@
-city car driving 1.4.1 crack DOWNLOAD 🗸 https://bytlly.com/2uGyKj
-
-City Car Driving Home Edition 1. Setelah proses instal selesai masukan serial number. Copy cracknya ke folder tempat city car driving yang. Terlebih dahulu ... 1fdad05405
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free Download !!LINK!!.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free Download !!LINK!!.md
deleted file mode 100644
index 2ee319080ebf62bf788fa0e699a9d400ad834df3..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free Download !!LINK!!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free Download DOWNLOAD ►►►►► https://bytlly.com/2uGxxV
-
-Free download cracked Adobe Acrobat XI Pro Final full version with torrent ... Cockos Reaper V4.57 (x86-x64) Cracked-F4CG [TorDigger] Free ... 4d29de3e1b
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Isumsoft Windows Password Refixer Ultimate Crack !EXCLUSIVE!.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Isumsoft Windows Password Refixer Ultimate Crack !EXCLUSIVE!.md
deleted file mode 100644
index 83b636e04a82f7b95973dfe768b196ae32b45f0d..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Isumsoft Windows Password Refixer Ultimate Crack !EXCLUSIVE!.md
+++ /dev/null
@@ -1,12 +0,0 @@
-isumsoft windows password refixer ultimate crack Download ->->->-> https://bytlly.com/2uGxCK
-
-12 Apr 2020 - Windows Password Recovery Tool Ultimate Crack Free Download The world's first home windows password recovery software to reset ... Windows Password Recovery Tool - free download Windows ...
-13 Jul 2017 ...
-Download Windows Password Recovery Tool - https://goo.gl/NVqnvF.
-In this video I will show you how to recover a deleted password on Windows 7 ...
-6 Jul 2015 ...
-Download Windows Password Recovery Tool 1.0 [Crack] - https://goo.gl/vx2fT5 Windows Password Recovery Tool 1.0 is the first and only, fastest and most complete tool to recover forgotten passwords ...
-Windows Password Recovery Tool - download free ... 8a78ff9644
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator FSX 737 Pilot In Command [English] [P Bot.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator FSX 737 Pilot In Command [English] [P Bot.md
deleted file mode 100644
index e9ba78702c347ce4e50b01e945b94b6ce384f880..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Microsoft Flight Simulator FSX 737 Pilot In Command [English] [P Bot.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Microsoft Flight Simulator FSX 737 Pilot In Command [English] [P bot Download File ⚹ https://bytlly.com/2uGx2Z
-
-112 accident investigations, including Air Algerie, a Boeing 737 ... A longer chain of events, as analyzed downstream (or at the bot- ... taxied into gate C-3, and because the pilot in command did not ... conducted in simulators instead of an actual airplane. ... British Military Aircraft Accidents: The Last 25 Years, Ian Allen. 1fdad05405
-
-
-
diff --git a/spaces/lixq/bingo61/Dockerfile b/spaces/lixq/bingo61/Dockerfile
deleted file mode 100644
index 3aa2b29b5fc4fa8b8238955acd7f1fde13ce5e1a..0000000000000000000000000000000000000000
--- a/spaces/lixq/bingo61/Dockerfile
+++ /dev/null
@@ -1,36 +0,0 @@
-FROM node:18
-
-
-ARG DEBIAN_FRONTEND=noninteractive
-
-ENV BING_HEADER ""
-
-# Set home to the user's home directory
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-# Set up a new user named "user" with user ID 1000
-RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME
-
-# Switch to the "user" user
-USER user
-
-# Set the working directory to the user's home directory
-WORKDIR $HOME/app
-
-# Install app dependencies
-# A wildcard is used to ensure both package.json AND package-lock.json are copied
-# where available (npm@5+)
-COPY --chown=user package*.json $HOME/app/
-
-RUN npm install
-
-# Copy the current directory contents into the container at $HOME/app setting the owner to the user
-COPY --chown=user . $HOME/app/
-
-RUN npm run build
-
-ENV PORT 7860
-EXPOSE 7860
-
-CMD npm start
diff --git a/spaces/lqy09/GT/public/GTest/main.html b/spaces/lqy09/GT/public/GTest/main.html
deleted file mode 100644
index d90601a2a2c1179c82e048cfb8ec166b783dbc57..0000000000000000000000000000000000000000
--- a/spaces/lqy09/GT/public/GTest/main.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
- GTest
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/luost26/DiffAb/diffab/tools/relax/pyrosetta_relaxer.py b/spaces/luost26/DiffAb/diffab/tools/relax/pyrosetta_relaxer.py
deleted file mode 100644
index 2696f313850d2faa1695883904884e9ccb9cd964..0000000000000000000000000000000000000000
--- a/spaces/luost26/DiffAb/diffab/tools/relax/pyrosetta_relaxer.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# pyright: reportMissingImports=false
-import os
-import time
-import pyrosetta
-from pyrosetta.rosetta.protocols.relax import FastRelax
-from pyrosetta.rosetta.core.pack.task import TaskFactory
-from pyrosetta.rosetta.core.pack.task import operation
-from pyrosetta.rosetta.core.select import residue_selector as selections
-from pyrosetta.rosetta.core.select.movemap import MoveMapFactory, move_map_action
-pyrosetta.init(' '.join([
- '-mute', 'all',
- '-use_input_sc',
- '-ignore_unrecognized_res',
- '-ignore_zero_occupancy', 'false',
- '-load_PDB_components', 'false',
- '-relax:default_repeats', '2',
- '-no_fconfig',
-]))
-
-from diffab.tools.relax.base import RelaxTask
-
-
-def current_milli_time():
- return round(time.time() * 1000)
-
-
-def parse_residue_position(p):
- icode = None
- if not p[-1].isnumeric(): # Has ICODE
- icode = p[-1]
-
- for i, c in enumerate(p):
- if c.isnumeric():
- break
- chain = p[:i]
- resseq = int(p[i:])
-
- if icode is not None:
- return chain, resseq, icode
- else:
- return chain, resseq
-
-
-def get_scorefxn(scorefxn_name:str):
- """
- Gets the scorefxn with appropriate corrections.
- Taken from: https://gist.github.com/matteoferla/b33585f3aeab58b8424581279e032550
- """
- import pyrosetta
-
- corrections = {
- 'beta_july15': False,
- 'beta_nov16': False,
- 'gen_potential': False,
- 'restore_talaris_behavior': False,
- }
- if 'beta_july15' in scorefxn_name or 'beta_nov15' in scorefxn_name:
- # beta_july15 is ref2015
- corrections['beta_july15'] = True
- elif 'beta_nov16' in scorefxn_name:
- corrections['beta_nov16'] = True
- elif 'genpot' in scorefxn_name:
- corrections['gen_potential'] = True
- pyrosetta.rosetta.basic.options.set_boolean_option('corrections:beta_july15', True)
- elif 'talaris' in scorefxn_name: #2013 and 2014
- corrections['restore_talaris_behavior'] = True
- else:
- pass
- for corr, value in corrections.items():
- pyrosetta.rosetta.basic.options.set_boolean_option(f'corrections:{corr}', value)
- return pyrosetta.create_score_function(scorefxn_name)
-
-
-class RelaxRegion(object):
-
- def __init__(self, scorefxn='ref2015', max_iter=1000, subset='nbrs', move_bb=True):
- super().__init__()
- self.scorefxn = get_scorefxn(scorefxn)
- self.fast_relax = FastRelax()
- self.fast_relax.set_scorefxn(self.scorefxn)
- self.fast_relax.max_iter(max_iter)
- assert subset in ('all', 'target', 'nbrs')
- self.subset = subset
- self.move_bb = move_bb
-
- def __call__(self, pdb_path, flexible_residue_first, flexible_residue_last):
- pose = pyrosetta.pose_from_pdb(pdb_path)
- start_t = current_milli_time()
- original_pose = pose.clone()
-
- tf = TaskFactory()
- tf.push_back(operation.InitializeFromCommandline())
- tf.push_back(operation.RestrictToRepacking()) # Only allow residues to repack. No design at any position.
-
- # Create selector for the region to be relaxed
- # Turn off design and repacking on irrelevant positions
- if flexible_residue_first[-1] == ' ':
- flexible_residue_first = flexible_residue_first[:-1]
- if flexible_residue_last[-1] == ' ':
- flexible_residue_last = flexible_residue_last[:-1]
- if self.subset != 'all':
- gen_selector = selections.ResidueIndexSelector()
- gen_selector.set_index_range(
- pose.pdb_info().pdb2pose(*flexible_residue_first),
- pose.pdb_info().pdb2pose(*flexible_residue_last),
- )
- nbr_selector = selections.NeighborhoodResidueSelector()
- nbr_selector.set_focus_selector(gen_selector)
- nbr_selector.set_include_focus_in_subset(True)
-
- if self.subset == 'nbrs':
- subset_selector = nbr_selector
- elif self.subset == 'target':
- subset_selector = gen_selector
-
- prevent_repacking_rlt = operation.PreventRepackingRLT()
- prevent_subset_repacking = operation.OperateOnResidueSubset(
- prevent_repacking_rlt,
- subset_selector,
- flip_subset=True,
- )
- tf.push_back(prevent_subset_repacking)
-
- scorefxn = self.scorefxn
- fr = self.fast_relax
-
- pose = original_pose.clone()
- pos_list = pyrosetta.rosetta.utility.vector1_unsigned_long()
- for pos in range(pose.pdb_info().pdb2pose(*flexible_residue_first), pose.pdb_info().pdb2pose(*flexible_residue_last)+1):
- pos_list.append(pos)
- # basic_idealize(pose, pos_list, scorefxn, fast=True)
-
- mmf = MoveMapFactory()
- if self.move_bb:
- mmf.add_bb_action(move_map_action.mm_enable, gen_selector)
- mmf.add_chi_action(move_map_action.mm_enable, subset_selector)
- mm = mmf.create_movemap_from_pose(pose)
-
- fr.set_movemap(mm)
- fr.set_task_factory(tf)
- fr.apply(pose)
-
- e_before = scorefxn(original_pose)
- e_relax = scorefxn(pose)
- # print('\n\n[Finished in %.2f secs]' % ((current_milli_time() - start_t) / 1000))
- # print(' > Energy (before): %.4f' % scorefxn(original_pose))
- # print(' > Energy (optimized): %.4f' % scorefxn(pose))
- return pose, e_before, e_relax
-
-
-def run_pyrosetta(task: RelaxTask):
- if not task.can_proceed() :
- return task
- if task.update_if_finished('rosetta'):
- return task
-
- minimizer = RelaxRegion()
- pose_min, _, _ = minimizer(
- pdb_path = task.current_path,
- flexible_residue_first = task.flexible_residue_first,
- flexible_residue_last = task.flexible_residue_last,
- )
-
- out_path = task.set_current_path_tag('rosetta')
- pose_min.dump_pdb(out_path)
- task.mark_success()
- return task
-
-
-def run_pyrosetta_fixbb(task: RelaxTask):
- if not task.can_proceed() :
- return task
- if task.update_if_finished('fixbb'):
- return task
-
- minimizer = RelaxRegion(move_bb=False)
- pose_min, _, _ = minimizer(
- pdb_path = task.current_path,
- flexible_residue_first = task.flexible_residue_first,
- flexible_residue_last = task.flexible_residue_last,
- )
-
- out_path = task.set_current_path_tag('fixbb')
- pose_min.dump_pdb(out_path)
- task.mark_success()
- return task
-
-
-
diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/losses.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/losses.py
deleted file mode 100644
index dbd513dde6c65389672378c07e1e470e6464603d..0000000000000000000000000000000000000000
--- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/losses/losses.py
+++ /dev/null
@@ -1,423 +0,0 @@
-import math
-import torch
-from torch import autograd as autograd
-from torch import nn as nn
-from torch.nn import functional as F
-
-from basicsr.archs.vgg_arch import VGGFeatureExtractor
-from basicsr.utils.registry import LOSS_REGISTRY
-from .loss_util import weighted_loss
-
-_reduction_modes = ['none', 'mean', 'sum']
-
-
-@weighted_loss
-def l1_loss(pred, target):
- return F.l1_loss(pred, target, reduction='none')
-
-
-@weighted_loss
-def mse_loss(pred, target):
- return F.mse_loss(pred, target, reduction='none')
-
-
-@weighted_loss
-def charbonnier_loss(pred, target, eps=1e-12):
- return torch.sqrt((pred - target)**2 + eps)
-
-
-@LOSS_REGISTRY.register()
-class L1Loss(nn.Module):
- """L1 (mean absolute error, MAE) loss.
-
- Args:
- loss_weight (float): Loss weight for L1 loss. Default: 1.0.
- reduction (str): Specifies the reduction to apply to the output.
- Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
- """
-
- def __init__(self, loss_weight=1.0, reduction='mean'):
- super(L1Loss, self).__init__()
- if reduction not in ['none', 'mean', 'sum']:
- raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}')
-
- self.loss_weight = loss_weight
- self.reduction = reduction
-
- def forward(self, pred, target, weight=None, **kwargs):
- """
- Args:
- pred (Tensor): of shape (N, C, H, W). Predicted tensor.
- target (Tensor): of shape (N, C, H, W). Ground truth tensor.
- weight (Tensor, optional): of shape (N, C, H, W). Element-wise
- weights. Default: None.
- """
- return self.loss_weight * l1_loss(pred, target, weight, reduction=self.reduction)
-
-
-@LOSS_REGISTRY.register()
-class MSELoss(nn.Module):
- """MSE (L2) loss.
-
- Args:
- loss_weight (float): Loss weight for MSE loss. Default: 1.0.
- reduction (str): Specifies the reduction to apply to the output.
- Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
- """
-
- def __init__(self, loss_weight=1.0, reduction='mean'):
- super(MSELoss, self).__init__()
- if reduction not in ['none', 'mean', 'sum']:
- raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}')
-
- self.loss_weight = loss_weight
- self.reduction = reduction
-
- def forward(self, pred, target, weight=None, **kwargs):
- """
- Args:
- pred (Tensor): of shape (N, C, H, W). Predicted tensor.
- target (Tensor): of shape (N, C, H, W). Ground truth tensor.
- weight (Tensor, optional): of shape (N, C, H, W). Element-wise
- weights. Default: None.
- """
- return self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction)
-
-
-@LOSS_REGISTRY.register()
-class CharbonnierLoss(nn.Module):
- """Charbonnier loss (one variant of Robust L1Loss, a differentiable
- variant of L1Loss).
-
- Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
- Super-Resolution".
-
- Args:
- loss_weight (float): Loss weight for L1 loss. Default: 1.0.
- reduction (str): Specifies the reduction to apply to the output.
- Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
- eps (float): A value used to control the curvature near zero.
- Default: 1e-12.
- """
-
- def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12):
- super(CharbonnierLoss, self).__init__()
- if reduction not in ['none', 'mean', 'sum']:
- raise ValueError(f'Unsupported reduction mode: {reduction}. ' f'Supported ones are: {_reduction_modes}')
-
- self.loss_weight = loss_weight
- self.reduction = reduction
- self.eps = eps
-
- def forward(self, pred, target, weight=None, **kwargs):
- """
- Args:
- pred (Tensor): of shape (N, C, H, W). Predicted tensor.
- target (Tensor): of shape (N, C, H, W). Ground truth tensor.
- weight (Tensor, optional): of shape (N, C, H, W). Element-wise
- weights. Default: None.
- """
- return self.loss_weight * charbonnier_loss(pred, target, weight, eps=self.eps, reduction=self.reduction)
-
-
-@LOSS_REGISTRY.register()
-class WeightedTVLoss(L1Loss):
- """Weighted TV loss.
-
- Args:
- loss_weight (float): Loss weight. Default: 1.0.
- """
-
- def __init__(self, loss_weight=1.0):
- super(WeightedTVLoss, self).__init__(loss_weight=loss_weight)
-
- def forward(self, pred, weight=None):
- y_diff = super(WeightedTVLoss, self).forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=weight[:, :, :-1, :])
- x_diff = super(WeightedTVLoss, self).forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=weight[:, :, :, :-1])
-
- loss = x_diff + y_diff
-
- return loss
-
-
-@LOSS_REGISTRY.register()
-class PerceptualLoss(nn.Module):
- """Perceptual loss with commonly used style loss.
-
- Args:
- layer_weights (dict): The weight for each layer of vgg feature.
- Here is an example: {'conv5_4': 1.}, which means the conv5_4
- feature layer (before relu5_4) will be extracted with weight
- 1.0 in calculting losses.
- vgg_type (str): The type of vgg network used as feature extractor.
- Default: 'vgg19'.
- use_input_norm (bool): If True, normalize the input image in vgg.
- Default: True.
- range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].
- Default: False.
- perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
- loss will be calculated and the loss will multiplied by the
- weight. Default: 1.0.
- style_weight (float): If `style_weight > 0`, the style loss will be
- calculated and the loss will multiplied by the weight.
- Default: 0.
- criterion (str): Criterion used for perceptual loss. Default: 'l1'.
- """
-
- def __init__(self,
- layer_weights,
- vgg_type='vgg19',
- use_input_norm=True,
- range_norm=False,
- perceptual_weight=1.0,
- style_weight=0.,
- criterion='l1'):
- super(PerceptualLoss, self).__init__()
- self.perceptual_weight = perceptual_weight
- self.style_weight = style_weight
- self.layer_weights = layer_weights
- self.vgg = VGGFeatureExtractor(
- layer_name_list=list(layer_weights.keys()),
- vgg_type=vgg_type,
- use_input_norm=use_input_norm,
- range_norm=range_norm)
-
- self.criterion_type = criterion
- if self.criterion_type == 'l1':
- self.criterion = torch.nn.L1Loss()
- elif self.criterion_type == 'l2':
- self.criterion = torch.nn.L2loss()
- elif self.criterion_type == 'fro':
- self.criterion = None
- else:
- raise NotImplementedError(f'{criterion} criterion has not been supported.')
-
- def forward(self, x, gt):
- """Forward function.
-
- Args:
- x (Tensor): Input tensor with shape (n, c, h, w).
- gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
-
- Returns:
- Tensor: Forward results.
- """
- # extract vgg features
- x_features = self.vgg(x)
- gt_features = self.vgg(gt.detach())
-
- # calculate perceptual loss
- if self.perceptual_weight > 0:
- percep_loss = 0
- for k in x_features.keys():
- if self.criterion_type == 'fro':
- percep_loss += torch.norm(x_features[k] - gt_features[k], p='fro') * self.layer_weights[k]
- else:
- percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k]
- percep_loss *= self.perceptual_weight
- else:
- percep_loss = None
-
- # calculate style loss
- if self.style_weight > 0:
- style_loss = 0
- for k in x_features.keys():
- if self.criterion_type == 'fro':
- style_loss += torch.norm(
- self._gram_mat(x_features[k]) - self._gram_mat(gt_features[k]), p='fro') * self.layer_weights[k]
- else:
- style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat(
- gt_features[k])) * self.layer_weights[k]
- style_loss *= self.style_weight
- else:
- style_loss = None
-
- return percep_loss, style_loss
-
- def _gram_mat(self, x):
- """Calculate Gram matrix.
-
- Args:
- x (torch.Tensor): Tensor with shape of (n, c, h, w).
-
- Returns:
- torch.Tensor: Gram matrix.
- """
- n, c, h, w = x.size()
- features = x.view(n, c, w * h)
- features_t = features.transpose(1, 2)
- gram = features.bmm(features_t) / (c * h * w)
- return gram
-
-
-@LOSS_REGISTRY.register()
-class GANLoss(nn.Module):
- """Define GAN loss.
-
- Args:
- gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.
- real_label_val (float): The value for real label. Default: 1.0.
- fake_label_val (float): The value for fake label. Default: 0.0.
- loss_weight (float): Loss weight. Default: 1.0.
- Note that loss_weight is only for generators; and it is always 1.0
- for discriminators.
- """
-
- def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0):
- super(GANLoss, self).__init__()
- self.gan_type = gan_type
- self.loss_weight = loss_weight
- self.real_label_val = real_label_val
- self.fake_label_val = fake_label_val
-
- if self.gan_type == 'vanilla':
- self.loss = nn.BCEWithLogitsLoss()
- elif self.gan_type == 'lsgan':
- self.loss = nn.MSELoss()
- elif self.gan_type == 'wgan':
- self.loss = self._wgan_loss
- elif self.gan_type == 'wgan_softplus':
- self.loss = self._wgan_softplus_loss
- elif self.gan_type == 'hinge':
- self.loss = nn.ReLU()
- else:
- raise NotImplementedError(f'GAN type {self.gan_type} is not implemented.')
-
- def _wgan_loss(self, input, target):
- """wgan loss.
-
- Args:
- input (Tensor): Input tensor.
- target (bool): Target label.
-
- Returns:
- Tensor: wgan loss.
- """
- return -input.mean() if target else input.mean()
-
- def _wgan_softplus_loss(self, input, target):
- """wgan loss with soft plus. softplus is a smooth approximation to the
- ReLU function.
-
- In StyleGAN2, it is called:
- Logistic loss for discriminator;
- Non-saturating loss for generator.
-
- Args:
- input (Tensor): Input tensor.
- target (bool): Target label.
-
- Returns:
- Tensor: wgan loss.
- """
- return F.softplus(-input).mean() if target else F.softplus(input).mean()
-
- def get_target_label(self, input, target_is_real):
- """Get target label.
-
- Args:
- input (Tensor): Input tensor.
- target_is_real (bool): Whether the target is real or fake.
-
- Returns:
- (bool | Tensor): Target tensor. Return bool for wgan, otherwise,
- return Tensor.
- """
-
- if self.gan_type in ['wgan', 'wgan_softplus']:
- return target_is_real
- target_val = (self.real_label_val if target_is_real else self.fake_label_val)
- return input.new_ones(input.size()) * target_val
-
- def forward(self, input, target_is_real, is_disc=False):
- """
- Args:
- input (Tensor): The input for the loss module, i.e., the network
- prediction.
- target_is_real (bool): Whether the targe is real or fake.
- is_disc (bool): Whether the loss for discriminators or not.
- Default: False.
-
- Returns:
- Tensor: GAN loss value.
- """
- target_label = self.get_target_label(input, target_is_real)
- if self.gan_type == 'hinge':
- if is_disc: # for discriminators in hinge-gan
- input = -input if target_is_real else input
- loss = self.loss(1 + input).mean()
- else: # for generators in hinge-gan
- loss = -input.mean()
- else: # other gan types
- loss = self.loss(input, target_label)
-
- # loss_weight is always 1.0 for discriminators
- return loss if is_disc else loss * self.loss_weight
-
-
-def r1_penalty(real_pred, real_img):
- """R1 regularization for discriminator. The core idea is to
- penalize the gradient on real data alone: when the
- generator distribution produces the true data distribution
- and the discriminator is equal to 0 on the data manifold, the
- gradient penalty ensures that the discriminator cannot create
- a non-zero gradient orthogonal to the data manifold without
- suffering a loss in the GAN game.
-
- Ref:
- Eq. 9 in Which training methods for GANs do actually converge.
- """
- grad_real = autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0]
- grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
- return grad_penalty
-
-
-def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
- noise = torch.randn_like(fake_img) / math.sqrt(fake_img.shape[2] * fake_img.shape[3])
- grad = autograd.grad(outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0]
- path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
-
- path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
-
- path_penalty = (path_lengths - path_mean).pow(2).mean()
-
- return path_penalty, path_lengths.detach().mean(), path_mean.detach()
-
-
-def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None):
- """Calculate gradient penalty for wgan-gp.
-
- Args:
- discriminator (nn.Module): Network for the discriminator.
- real_data (Tensor): Real input data.
- fake_data (Tensor): Fake input data.
- weight (Tensor): Weight tensor. Default: None.
-
- Returns:
- Tensor: A tensor for gradient penalty.
- """
-
- batch_size = real_data.size(0)
- alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1))
-
- # interpolate between real_data and fake_data
- interpolates = alpha * real_data + (1. - alpha) * fake_data
- interpolates = autograd.Variable(interpolates, requires_grad=True)
-
- disc_interpolates = discriminator(interpolates)
- gradients = autograd.grad(
- outputs=disc_interpolates,
- inputs=interpolates,
- grad_outputs=torch.ones_like(disc_interpolates),
- create_graph=True,
- retain_graph=True,
- only_inputs=True)[0]
-
- if weight is not None:
- gradients = gradients * weight
-
- gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
- if weight is not None:
- gradients_penalty /= torch.mean(weight)
-
- return gradients_penalty
diff --git a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/base_dataset.py b/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/base_dataset.py
deleted file mode 100644
index 57595dd0bf9dd20e333bd78a6a97013b9a6d0a43..0000000000000000000000000000000000000000
--- a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/base_dataset.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-
-import torch.utils.data as data
-from PIL import Image
-import torchvision.transforms as transforms
-import numpy as np
-import random
-
-
-class BaseDataset(data.Dataset):
- def __init__(self):
- super(BaseDataset, self).__init__()
-
- @staticmethod
- def modify_commandline_options(parser, is_train):
- return parser
-
- def initialize(self, opt):
- pass
-
-
-def get_params(opt, size):
- w, h = size
- new_h = h
- new_w = w
- if opt.preprocess_mode == "resize_and_crop":
- new_h = new_w = opt.load_size
- elif opt.preprocess_mode == "scale_width_and_crop":
- new_w = opt.load_size
- new_h = opt.load_size * h // w
- elif opt.preprocess_mode == "scale_shortside_and_crop":
- ss, ls = min(w, h), max(w, h) # shortside and longside
- width_is_shorter = w == ss
- ls = int(opt.load_size * ls / ss)
- new_w, new_h = (ss, ls) if width_is_shorter else (ls, ss)
-
- x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
- y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
-
- flip = random.random() > 0.5
- return {"crop_pos": (x, y), "flip": flip}
-
-
-def get_transform(opt, params, method=Image.BICUBIC, normalize=True, toTensor=True):
- transform_list = []
- if "resize" in opt.preprocess_mode:
- osize = [opt.load_size, opt.load_size]
- transform_list.append(transforms.Resize(osize, interpolation=method))
- elif "scale_width" in opt.preprocess_mode:
- transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method)))
- elif "scale_shortside" in opt.preprocess_mode:
- transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, method)))
-
- if "crop" in opt.preprocess_mode:
- transform_list.append(transforms.Lambda(lambda img: __crop(img, params["crop_pos"], opt.crop_size)))
-
- if opt.preprocess_mode == "none":
- base = 32
- transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
-
- if opt.preprocess_mode == "fixed":
- w = opt.crop_size
- h = round(opt.crop_size / opt.aspect_ratio)
- transform_list.append(transforms.Lambda(lambda img: __resize(img, w, h, method)))
-
- if opt.isTrain and not opt.no_flip:
- transform_list.append(transforms.Lambda(lambda img: __flip(img, params["flip"])))
-
- if toTensor:
- transform_list += [transforms.ToTensor()]
-
- if normalize:
- transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
- return transforms.Compose(transform_list)
-
-
-def normalize():
- return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
-
-
-def __resize(img, w, h, method=Image.BICUBIC):
- return img.resize((w, h), method)
-
-
-def __make_power_2(img, base, method=Image.BICUBIC):
- ow, oh = img.size
- h = int(round(oh / base) * base)
- w = int(round(ow / base) * base)
- if (h == oh) and (w == ow):
- return img
- return img.resize((w, h), method)
-
-
-def __scale_width(img, target_width, method=Image.BICUBIC):
- ow, oh = img.size
- if ow == target_width:
- return img
- w = target_width
- h = int(target_width * oh / ow)
- return img.resize((w, h), method)
-
-
-def __scale_shortside(img, target_width, method=Image.BICUBIC):
- ow, oh = img.size
- ss, ls = min(ow, oh), max(ow, oh) # shortside and longside
- width_is_shorter = ow == ss
- if ss == target_width:
- return img
- ls = int(target_width * ls / ss)
- nw, nh = (ss, ls) if width_is_shorter else (ls, ss)
- return img.resize((nw, nh), method)
-
-
-def __crop(img, pos, size):
- ow, oh = img.size
- x1, y1 = pos
- tw = th = size
- return img.crop((x1, y1, x1 + tw, y1 + th))
-
-
-def __flip(img, flip):
- if flip:
- return img.transpose(Image.FLIP_LEFT_RIGHT)
- return img
diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/app.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/app.py
deleted file mode 100644
index beffa167a27090b0f69a751f2e9198370aec5d6f..0000000000000000000000000000000000000000
--- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/app.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import gradio as gr
-import os
-import cv2
-import shutil
-import sys
-from subprocess import call
-import torch
-import numpy as np
-from skimage import color
-import torchvision.transforms as transforms
-from PIL import Image
-import torch
-
-os.system("pip install dlib")
-os.system('bash setup.sh')
-
-def lab2rgb(L, AB):
- """Convert an Lab tensor image to a RGB numpy output
- Parameters:
- L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
- AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
-
- Returns:
- rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
- """
- AB2 = AB * 110.0
- L2 = (L + 1.0) * 50.0
- Lab = torch.cat([L2, AB2], dim=1)
- Lab = Lab[0].data.cpu().float().numpy()
- Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
- rgb = color.lab2rgb(Lab) * 255
- return rgb
-
-def get_transform(model_name,params=None, grayscale=False, method=Image.BICUBIC):
- #params
- preprocess = 'resize'
- load_size = 256
- crop_size = 256
- transform_list = []
- if grayscale:
- transform_list.append(transforms.Grayscale(1))
- if model_name == "Pix2Pix Unet 256":
- osize = [load_size, load_size]
- transform_list.append(transforms.Resize(osize, method))
- # if 'crop' in preprocess:
- # if params is None:
- # transform_list.append(transforms.RandomCrop(crop_size))
-
- return transforms.Compose(transform_list)
-
-def inferRestoration(img, model_name):
- #if model_name == "Pix2Pix":
- model = torch.hub.load('manhkhanhad/ImageRestorationInfer', 'pix2pixRestoration_unet256')
- transform_list = [
- transforms.ToTensor(),
- transforms.Resize([256,256], Image.BICUBIC),
- transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
- ]
- transform = transforms.Compose(transform_list)
- img = transform(img)
- img = torch.unsqueeze(img, 0)
- result = model(img)
- result = result[0].detach()
- result = (result +1)/2.0
-
- result = transforms.ToPILImage()(result)
- return result
-
-def inferColorization(img,model_name):
- #print(model_name)
- if model_name == "Pix2Pix Resnet 9block":
- model = torch.hub.load('manhkhanhad/ImageRestorationInfer', 'pix2pixColorization_resnet9b')
- elif model_name == "Pix2Pix Unet 256":
- model = torch.hub.load('manhkhanhad/ImageRestorationInfer', 'pix2pixColorization_unet256')
- elif model_name == "Deoldify":
- model = torch.hub.load('manhkhanhad/ImageRestorationInfer', 'DeOldifyColorization')
- transform_list = [
- transforms.ToTensor(),
- transforms.Normalize((0.5,), (0.5,))
- ]
- transform = transforms.Compose(transform_list)
- #a = transforms.ToTensor()(a)
- img = img.convert('L')
- img = transform(img)
- img = torch.unsqueeze(img, 0)
- result = model(img)
-
- result = result[0].detach()
- result = (result +1)/2.0
-
- #img = transforms.Grayscale(3)(img)
- #img = transforms.ToTensor()(img)
- #img = torch.unsqueeze(img, 0)
- #result = model(img)
- #result = torch.clip(result, min=0, max=1)
- image_pil = transforms.ToPILImage()(result)
- return image_pil
-
- transform_seq = get_transform(model_name)
- img = transform_seq(img)
- # if model_name == "Pix2Pix Unet 256":
- # img.resize((256,256))
- img = np.array(img)
- lab = color.rgb2lab(img).astype(np.float32)
- lab_t = transforms.ToTensor()(lab)
- A = lab_t[[0], ...] / 50.0 - 1.0
- B = lab_t[[1, 2], ...] / 110.0
- #data = {'A': A, 'B': B, 'A_paths': "", 'B_paths': ""}
- L = torch.unsqueeze(A, 0)
- #print(L.shape)
- ab = model(L)
- Lab = lab2rgb(L, ab).astype(np.uint8)
- image_pil = Image.fromarray(Lab)
- #image_pil.save('test.png')
- #print(Lab.shape)
- return image_pil
-
-def colorizaition(image,model_name):
- image = Image.fromarray(image)
- result = inferColorization(image,model_name)
- return result
-
-
-def run_cmd(command):
- try:
- call(command, shell=True)
- except KeyboardInterrupt:
- print("Process interrupted")
- sys.exit(1)
-
-def run(image,Restoration_mode, Colorizaition_mode):
- if Restoration_mode == "BOPBTL":
- if os.path.isdir("Temp"):
- shutil.rmtree("Temp")
-
- os.makedirs("Temp")
- os.makedirs("Temp/input")
- print(type(image))
- cv2.imwrite("Temp/input/input_img.png", image)
-
- command = ("python run.py --input_folder "
- + "Temp/input"
- + " --output_folder "
- + "Temp"
- + " --GPU "
- + "-1"
- + " --with_scratch")
- run_cmd(command)
-
- result_restoration = Image.open("Temp/final_output/input_img.png")
- shutil.rmtree("Temp")
-
- elif Restoration_mode == "Pix2Pix":
- result_restoration = inferRestoration(image, Restoration_mode)
- print("Restoration_mode",Restoration_mode)
-
- result_colorization = inferColorization(result_restoration,Colorizaition_mode)
-
- return result_colorization
-
-examples = [['example/1.jpeg',"BOPBTL","Deoldify"],['example/2.jpg',"BOPBTL","Deoldify"],['example/3.jpg',"BOPBTL","Deoldify"],['example/4.jpg',"BOPBTL","Deoldify"]]
-iface = gr.Interface(run,
- [gr.inputs.Image(),gr.inputs.Radio(["BOPBTL", "Pix2Pix"]),gr.inputs.Radio(["Deoldify", "Pix2Pix Resnet 9block","Pix2Pix Unet 256"])],
- outputs="image",
- examples=examples).launch(debug=True,share=False)
\ No newline at end of file
diff --git a/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py b/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py
deleted file mode 100644
index 47ab4b1594faf1e9f1ce962fb980d80295b1f079..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Base classes for the datasets that also provide non-audio metadata,
-e.g. description, text transcription etc.
-"""
-from dataclasses import dataclass
-import logging
-import math
-import re
-import typing as tp
-
-import torch
-
-from .audio_dataset import AudioDataset, AudioMeta
-from ..environment import AudioCraftEnvironment
-from ..modules.conditioners import SegmentWithAttributes, ConditioningAttributes
-
-
-logger = logging.getLogger(__name__)
-
-
-def _clusterify_meta(meta: AudioMeta) -> AudioMeta:
- """Monkey-patch meta to match cluster specificities."""
- meta.path = AudioCraftEnvironment.apply_dataset_mappers(meta.path)
- if meta.info_path is not None:
- meta.info_path.zip_path = AudioCraftEnvironment.apply_dataset_mappers(meta.info_path.zip_path)
- return meta
-
-
-def clusterify_all_meta(meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]:
- """Monkey-patch all meta to match cluster specificities."""
- return [_clusterify_meta(m) for m in meta]
-
-
-@dataclass
-class AudioInfo(SegmentWithAttributes):
- """Dummy SegmentInfo with empty attributes.
-
- The InfoAudioDataset is expected to return metadata that inherits
- from SegmentWithAttributes class and can return conditioning attributes.
-
- This basically guarantees all datasets will be compatible with current
- solver that contain conditioners requiring this.
- """
- audio_tokens: tp.Optional[torch.Tensor] = None # populated when using cached batch for training a LM.
-
- def to_condition_attributes(self) -> ConditioningAttributes:
- return ConditioningAttributes()
-
-
-class InfoAudioDataset(AudioDataset):
- """AudioDataset that always returns metadata as SegmentWithAttributes along with the audio waveform.
-
- See `audiocraft.data.audio_dataset.AudioDataset` for initialization arguments.
- """
- def __init__(self, meta: tp.List[AudioMeta], **kwargs):
- super().__init__(clusterify_all_meta(meta), **kwargs)
-
- def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentWithAttributes]]:
- if not self.return_info:
- wav = super().__getitem__(index)
- assert isinstance(wav, torch.Tensor)
- return wav
- wav, meta = super().__getitem__(index)
- return wav, AudioInfo(**meta.to_dict())
-
-
-def get_keyword_or_keyword_list(value: tp.Optional[str]) -> tp.Union[tp.Optional[str], tp.Optional[tp.List[str]]]:
- """Preprocess a single keyword or possible a list of keywords."""
- if isinstance(value, list):
- return get_keyword_list(value)
- else:
- return get_keyword(value)
-
-
-def get_string(value: tp.Optional[str]) -> tp.Optional[str]:
- """Preprocess a single keyword."""
- if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
- return None
- else:
- return value.strip()
-
-
-def get_keyword(value: tp.Optional[str]) -> tp.Optional[str]:
- """Preprocess a single keyword."""
- if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
- return None
- else:
- return value.strip().lower()
-
-
-def get_keyword_list(values: tp.Union[str, tp.List[str]]) -> tp.Optional[tp.List[str]]:
- """Preprocess a list of keywords."""
- if isinstance(values, str):
- values = [v.strip() for v in re.split(r'[,\s]', values)]
- elif isinstance(values, float) and math.isnan(values):
- values = []
- if not isinstance(values, list):
- logger.debug(f"Unexpected keyword list {values}")
- values = [str(values)]
-
- kws = [get_keyword(v) for v in values]
- kw_list = [k for k in kws if k is not None]
- if len(kw_list) == 0:
- return None
- else:
- return kw_list
diff --git a/spaces/matthoffner/open-codetree/components/Loader.tsx b/spaces/matthoffner/open-codetree/components/Loader.tsx
deleted file mode 100644
index 4be0f62969723895e409c78b9b143380418bd20f..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/open-codetree/components/Loader.tsx
+++ /dev/null
@@ -1,64 +0,0 @@
-import React from "react";
-
-interface LoaderProps {
- size?: number;
- color?: string;
-}
-
-const Loader = ({ size = 50, color = "#FFFFFF" }: LoaderProps) => {
- return (
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- );
-};
-
-export default Loader;
diff --git a/spaces/matthoffner/starchat-ui/types/plugin.ts b/spaces/matthoffner/starchat-ui/types/plugin.ts
deleted file mode 100644
index 43da6c07b0f5c6ee022225babe72cb58ff0939f4..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/starchat-ui/types/plugin.ts
+++ /dev/null
@@ -1,39 +0,0 @@
-import { KeyValuePair } from './data';
-
-export interface Plugin {
- id: PluginID;
- name: PluginName;
- requiredKeys: KeyValuePair[];
-}
-
-export interface PluginKey {
- pluginId: PluginID;
- requiredKeys: KeyValuePair[];
-}
-
-export enum PluginID {
- GOOGLE_SEARCH = 'google-search',
-}
-
-export enum PluginName {
- GOOGLE_SEARCH = 'Google Search',
-}
-
-export const Plugins: Record = {
- [PluginID.GOOGLE_SEARCH]: {
- id: PluginID.GOOGLE_SEARCH,
- name: PluginName.GOOGLE_SEARCH,
- requiredKeys: [
- {
- key: 'GOOGLE_API_KEY',
- value: '',
- },
- {
- key: 'GOOGLE_CSE_ID',
- value: '',
- },
- ],
- },
-};
-
-export const PluginList = Object.values(Plugins);
diff --git a/spaces/maxmax20160403/sovits5.0/hubert/__init__.py b/spaces/maxmax20160403/sovits5.0/hubert/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/mayura25/handwritten_digit_recognition/README.md b/spaces/mayura25/handwritten_digit_recognition/README.md
deleted file mode 100644
index 9bf847eaf8b1b71d2a2232c858c63e37bac55d0f..0000000000000000000000000000000000000000
--- a/spaces/mayura25/handwritten_digit_recognition/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Handwritten Digit Recognition
-emoji: 🌍
-colorFrom: gray
-colorTo: red
-sdk: gradio
-sdk_version: 4.1.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/model/fiber.py b/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/model/fiber.py
deleted file mode 100644
index 38db33b0d27d70116c92650176170e9b3cf9a9c7..0000000000000000000000000000000000000000
--- a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/model/fiber.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-#
-# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
-# SPDX-License-Identifier: MIT
-
-
-from collections import namedtuple
-from itertools import product
-from typing import Dict
-
-import torch
-from torch import Tensor
-
-from se3_transformer.runtime.utils import degree_to_dim
-
-FiberEl = namedtuple('FiberEl', ['degree', 'channels'])
-
-
-class Fiber(dict):
- """
- Describes the structure of some set of features.
- Features are split into types (0, 1, 2, 3, ...). A feature of type k has a dimension of 2k+1.
- Type-0 features: invariant scalars
- Type-1 features: equivariant 3D vectors
- Type-2 features: equivariant symmetric traceless matrices
- ...
-
- As inputs to a SE3 layer, there can be many features of the same types, and many features of different types.
- The 'multiplicity' or 'number of channels' is the number of features of a given type.
- This class puts together all the degrees and their multiplicities in order to describe
- the inputs, outputs or hidden features of SE3 layers.
- """
-
- def __init__(self, structure):
- if isinstance(structure, dict):
- structure = [FiberEl(int(d), int(m)) for d, m in sorted(structure.items(), key=lambda x: x[1])]
- elif not isinstance(structure[0], FiberEl):
- structure = list(map(lambda t: FiberEl(*t), sorted(structure, key=lambda x: x[1])))
- self.structure = structure
- super().__init__({d: m for d, m in self.structure})
-
- @property
- def degrees(self):
- return sorted([t.degree for t in self.structure])
-
- @property
- def channels(self):
- return [self[d] for d in self.degrees]
-
- @property
- def num_features(self):
- """ Size of the resulting tensor if all features were concatenated together """
- return sum(t.channels * degree_to_dim(t.degree) for t in self.structure)
-
- @staticmethod
- def create(num_degrees: int, num_channels: int):
- """ Create a Fiber with degrees 0..num_degrees-1, all with the same multiplicity """
- return Fiber([(degree, num_channels) for degree in range(num_degrees)])
-
- @staticmethod
- def from_features(feats: Dict[str, Tensor]):
- """ Infer the Fiber structure from a feature dict """
- structure = {}
- for k, v in feats.items():
- degree = int(k)
- assert len(v.shape) == 3, 'Feature shape should be (N, C, 2D+1)'
- assert v.shape[-1] == degree_to_dim(degree)
- structure[degree] = v.shape[-2]
- return Fiber(structure)
-
- def __getitem__(self, degree: int):
- """ fiber[degree] returns the multiplicity for this degree """
- return dict(self.structure).get(degree, 0)
-
- def __iter__(self):
- """ Iterate over namedtuples (degree, channels) """
- return iter(self.structure)
-
- def __mul__(self, other):
- """
- If other in an int, multiplies all the multiplicities by other.
- If other is a fiber, returns the cartesian product.
- """
- if isinstance(other, Fiber):
- return product(self.structure, other.structure)
- elif isinstance(other, int):
- return Fiber({t.degree: t.channels * other for t in self.structure})
-
- def __add__(self, other):
- """
- If other in an int, add other to all the multiplicities.
- If other is a fiber, add the multiplicities of the fibers together.
- """
- if isinstance(other, Fiber):
- return Fiber({t.degree: t.channels + other[t.degree] for t in self.structure})
- elif isinstance(other, int):
- return Fiber({t.degree: t.channels + other for t in self.structure})
-
- def __repr__(self):
- return str(self.structure)
-
- @staticmethod
- def combine_max(f1, f2):
- """ Combine two fiber by taking the maximum multiplicity for each degree in both fibers """
- new_dict = dict(f1.structure)
- for k, m in f2.structure:
- new_dict[k] = max(new_dict.get(k, 0), m)
-
- return Fiber(list(new_dict.items()))
-
- @staticmethod
- def combine_selectively(f1, f2):
- """ Combine two fiber by taking the sum of multiplicities for each degree in the first fiber """
- # only use orders which occur in fiber f1
- new_dict = dict(f1.structure)
- for k in f1.degrees:
- if k in f2.degrees:
- new_dict[k] += f2[k]
- return Fiber(list(new_dict.items()))
-
- def to_attention_heads(self, tensors: Dict[str, Tensor], num_heads: int):
- # dict(N, num_channels, 2d+1) -> (N, num_heads, -1)
- fibers = [tensors[str(degree)].reshape(*tensors[str(degree)].shape[:-2], num_heads, -1) for degree in
- self.degrees]
- fibers = torch.cat(fibers, -1)
- return fibers
diff --git a/spaces/merve/data-leak/source/anonymization/style-graph-scroll.css b/spaces/merve/data-leak/source/anonymization/style-graph-scroll.css
deleted file mode 100644
index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000
--- a/spaces/merve/data-leak/source/anonymization/style-graph-scroll.css
+++ /dev/null
@@ -1,160 +0,0 @@
-/** { border: 1px solid #f00; }*/
-
-
-#container{
- position: relative;
- width: auto;
- margin-left: -25px;
- /*margin-bottom: 100px;*/
-}
-
-#sections{
- width: 330px;
- pointer-events: none;
-}
-
-#sections > div{
- background: white;
- opacity: .2;
- margin-bottom: 400px;
- line-height: 1.4em;
- transition: opacity .2s;
- pointer-events: all;
-}
-#sections > div:last-child{
- height: 480px;
- margin-bottom: 0px;
-}
-#sections > div.graph-scroll-active{
- opacity: 1;
-}
-
-#graph{
- margin-left: 40px;
- width: 500px;
- position: -webkit-sticky;
- position: sticky;
- top: 0px;
- float: right;
- height: 580px;
-}
-
-.slider-outer {
- display: block;
- max-width: 300px;
-}
-
-@media (max-width: 925px) {
- #container{
- margin-left: 0px;
- }
-
- #graph{
- width: 100%;
- float: none;
- max-width: 500px;
- margin: 0px auto;
- }
-
- #graph > div{
- position: relative;
- left:12px;
- }
-
- #sections{
- width: auto;
- position: relative;
- margin: 0px auto;
- }
-
- #sections > div{
- background: rgba(255,255,255,.8);
- padding: 10px;
- border-top: 1px solid;
- border-bottom: 1px solid;
- margin-bottom: 80vh;
- width: calc(100vw - 20px);
- margin-left: -5px;
- }
-
- #sections > div > *{
- max-width: 750px;
- }
-
- #sections > div:first-child{
- opacity: 1;
- margin-top: -260px;
- }
-
- #sections > div:last-child{
- height: auto;
- }
-
- #sections h3{
- margin-top: .5em;
- }
-
- /* Adjust buttons for mobile. */
-
- .button-container{
- text-align: center;
- left:0px;
- }
-
- /* Adjust sliders for mobile. */
- input[type="range" i] {
- width: 280px;
- }
- .slider-label-container{
- width: 145px;
- /* display: inline-block; */
- }
-
- .slide-container-heads-prob, .slide-container-population {
- text-align: center;
- }
-
- .slider-container {
- margin-bottom: 5px;
- text-align: center;
- width: 300px;
- /* display:inline-block; */
- }
-
- .slider-outer {
- text-align: center;
- display: flex;
- max-width: 300px;
- }
-
- .headsProb, .population {
- margin-left: 15px;
- }
-
- .slide-container-population {
- margin-bottom: -10px;
- }
-
- .pointer div {
- left: 10px;
- top: 37px;
- }
-
- /* Adjust post summary test for mobile. */
- .post-summary{
- margin-left: 8px;
- margin-bottom: 60px;
- margin-top: 40px;
- }
-
-}
-
-#graph > div{
- margin: 20 35px;
-}
-
-
-#end{
- height: 15vh;
-}
-
diff --git a/spaces/merve/fill-in-the-blank/source/base-rate/sliders.js b/spaces/merve/fill-in-the-blank/source/base-rate/sliders.js
deleted file mode 100644
index 994c9ba490dc44dfa015553d32ff24e822f16de0..0000000000000000000000000000000000000000
--- a/spaces/merve/fill-in-the-blank/source/base-rate/sliders.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright 2020 Google LLC. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-
-
-
-
-var sliderVals = {}
-
-var sliders = [
- {
- key: 'fNoiseMag',
- text: 'Feature Noise',
- r: [0, 1],
- v: .5
- },
- {
- key: 'fBiasMag',
- text: 'Feature Bias',
- r: [0, 1],
- v: .2
- },
-]
-
-!(function(){
- var width = 145
- var height = 30
-
- sliders.forEach(d => {
- d.s = d3.scaleLinear().domain(d.r).range([0, width])
- sliderVals[d.key] = d
- })
-
- var sliderSel = d3.select('.slider').html('')
- .appendMany('div', sliders)
- .at({class: d => d.key})
- .st({
- display: 'inline-block',
- width: width,
- paddingRight: 60,
- marginTop: 20,
- color: '#000'
- })
-
- sliderSel.append('div')
- .text(d => d.text)
- .st({marginBottom: height/2})
-
- var svgSel = sliderSel.append('svg').at({width, height})
- .on('click', function(d){
- d.v = d.s.invert(d3.mouse(this)[0])
- updatePos()
- })
- .st({
- cursor: 'pointer'
- })
- .append('g').translate(height/2, 1)
- svgSel.append('rect').at({width, height, y: -height/2, fill: '#fff'})
-
- svgSel.append('path').at({
- d: `M 0 0 H ${width}`,
- stroke: '#000',
- strokeWidth: 2
- })
-
- var drag = d3.drag()
- .on('drag', function(d){
- var x = d3.mouse(this)[0]
- d.v = d3.clamp(d3.min(d.r), d.s.invert(x), d3.max(d.r))
-
- updatePos()
- })
-
- var circleSel = svgSel.append('circle')
- .at({
- r: height/2,
- stroke: '#000',
- strokeWidth: 2,
- fill: '#fff',
- })
- .call(drag)
-
-
- function updatePos(){
- circleSel.at({cx: d => d.s(d.v)})
- if (sliderVals.onUpdate) sliderVals.onUpdate()
- }
-
- updatePos()
- sliderVals.updatePos = updatePos
-})()
diff --git a/spaces/merve/measuring-fairness/public/third_party/index.js b/spaces/merve/measuring-fairness/public/third_party/index.js
deleted file mode 100644
index e070ccfa3ac2645f9431b1e4dbee36e81692574d..0000000000000000000000000000000000000000
--- a/spaces/merve/measuring-fairness/public/third_party/index.js
+++ /dev/null
@@ -1,74 +0,0 @@
-// https://github.com/1wheel/roadtolarissa Copyright 2018 Adam Pearce
-
-var fs = require('fs')
-var {exec, execSync} = require('child_process')
-
-var source = `${__dirname}/../../source`
-var public = `${__dirname}/../../public`
-if (!fs.existsSync(public)) fs.mkdirSync(public)
-
-function rsyncSource(){
- exec(`rsync -a --exclude _posts --exclude _templates ${source}/ ${public}/`)
-}
-rsyncSource()
-
-var hljs = require('highlight.js')
-var marked = require('marked')
-marked.setOptions({
- highlight: (code, lang) => hljs.highlight(lang || 'html', code).value,
- smartypants: true
-})
-
-var templates = {}
-readdirAbs(`${source}/_templates`).forEach(path => {
- var str = fs.readFileSync(path, 'utf8')
- var templateName = path.split('_templates/')[1]
- templates[templateName] = d => eval('`' + str + '`')
-})
-
-function readdirAbs(dir){ return fs.readdirSync(dir).map(d => dir + '/' + d) }
-
-var posts = readdirAbs(`${source}/_posts`)
- .filter(d => !d.includes('.DS_Store'))
- .map(parsePost)
-
-fs.writeFileSync(public + '/rss.xml', templates['rss.xml'](posts))
-fs.writeFileSync(public + '/sitemap.xml', templates['sitemap.xml'](posts))
-
-function parsePost(path){
- var str = fs.readFileSync(path, 'utf8')
- if (str[0] == '<') str = str.split('License.\n-->')[1]
- var [top, body] = str
- .replace('---\n', '')
- .split('\n---\n')
-
- console.log(path)
-
- var post = {html: path.includes('.html') ? body : marked(body)}
- top.split('\n').forEach(line => {
- var [key, val] = line.split(/: (.+)/)
- post[key] = val
- })
-
- return post
-}
-
-function writePost(post){
- var dir = public + post.permalink
- if (!fs.existsSync(dir)) execSync(`mkdir -p ${dir}`)
- fs.writeFileSync(`${dir}/index.html`, templates[post.template](post))
-
- var outposts = JSON.parse(JSON.stringify(posts))
- outposts.forEach(d => delete d.html)
- fs.writeFileSync(public + '/posts.json', JSON.stringify(outposts, null, 2))
-
-
-}
-posts.forEach(writePost)
-
-if (process.argv.includes('--watch')){
- require('chokidar').watch(source).on('change', path => {
- rsyncSource()
- if (path.includes('_posts/')) writePost(parsePost(path))
- })
-}
diff --git a/spaces/mikeee/radiobee-dev/radiobee/process_upload.py b/spaces/mikeee/radiobee-dev/radiobee/process_upload.py
deleted file mode 100644
index b61958bbf7cdad4d799443aafce9226c7f39d4ab..0000000000000000000000000000000000000000
--- a/spaces/mikeee/radiobee-dev/radiobee/process_upload.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""Process uploads."""
-# pylint: disable=invalid-name, unused-import
-from typing import Union
-
-from pathlib import Path
-import tempfile
-import cchardet
-from logzero import logger
-
-
-def process_upload(upload: Union[tempfile._TemporaryFileWrapper, bytes]) -> str:
- """Process upload (fileobj or bytes(zip file: io.BytesIO further to zipfile.ZipFile)).
-
- gr.inputs.File("file"): upload normal file
- gr.inputs.File("bytes"): upload zip file
-
- """
- if isinstance(upload, bytes):
- logger.warning("Not implemented, yet, for zip file")
- return "Not implemented, yet, for zip file"
-
- try:
- fpath = Path(upload.name)
- except Exception as e:
- logger.error("Path(upload.name) error: %s", e)
- return str(e)
-
- suffixes = [
- "",
- ".txt",
- ".text",
- ".md",
- "tsv",
- ]
- # check .txt .md ''(no suffix)
- if fpath.suffix.lower() not in suffixes:
- logger.warning('suffix: [%s] not in %s', fpath.suffix, suffixes)
- # return "File type not supported, yet."
-
- try:
- data = Path(upload.name).read_bytes()
- except Exception as e:
- logger.error("Unable to read data from %s, errors: %s", fpath, e)
- data = str(e).encode()
-
- # no data, empty file, return ""
- if not data:
- logger.info("empty file: %s", upload.name)
- return ""
-
- encoding = cchardet.detect(data).get("encoding")
-
- if encoding is not None:
- try:
- text = fpath.read_text(encoding=encoding)
- except Exception as e:
- logger.error("Unable to retrieve text, error: %s", e)
- text = str(e)
-
- # return f"{upload.name} {type(upload)}\n\n{text}"
- # return f"{upload.name}\n{text}"
- return text
-
- # not able to cchardet: encoding is None, docx, pdf, epub, zip etc
- logger.info("Trying docx...to be implemented")
-
- # T ODO .docx .epub .mobi .pdf etc.
-
- _ = Path(upload.name)
- msg = f"binary file: {_.stem[:-8]}{_.suffix}"
- logger.warning("%s", msg)
-
- return msg
-
-
-_ = ''' # colab gradio-file-inputs-upload.ipynb
-# file_to_text/process_file
-def zip_to_text(file_obj):
- """
- # zf = zipfile.ZipFile('german-recipes-dataset.zip')
- zf = file_obj
- namelist = zipfile.ZipFile.namelist(zf);
- # filename = zf.open(namelist[0]);
- file_contents = []
- for filename in namelist:
- with zf.open(filename) as fhandle:
- file_contents.append(fhandle.read().decode())
- """
- # fileobj is
-
- # gr.inputs.File("bytes")
- if isinstance(file_obj, bytes):
- data = file_obj.decode()
- return f"{type(file_obj)}\n{dir(file_obj)}\n{data}"
-
- # "file"/gr.inputs.File("file") file_obj.name: /tmp/READMEzm8hc5ze.md
- data = Path(file_obj.name).read_bytes()
- return f"{file_obj.name} {type(file_obj)}\n{dir(file_obj)} \n{data}"
-# '''
diff --git a/spaces/ml6team/Knowledge-graphs/utils.py b/spaces/ml6team/Knowledge-graphs/utils.py
deleted file mode 100644
index 5b23e942e54cc855989de77ade8370a3a66fc7f2..0000000000000000000000000000000000000000
--- a/spaces/ml6team/Knowledge-graphs/utils.py
+++ /dev/null
@@ -1,8 +0,0 @@
-
-def clip_text(t, lenght = 4):
- t_sub = t.replace("...", "dotdotdot")
- t_clipped = ".".join(t_sub.split(".")[:lenght]) + "."
- t_reverted = t_clipped.replace("dotdotdot", "...")
- return t_reverted
-
-
diff --git a/spaces/monra/freegpt-webui/client/css/label.css b/spaces/monra/freegpt-webui/client/css/label.css
deleted file mode 100644
index d84873d41e41f2cc22f9d3ace67c30ec07706811..0000000000000000000000000000000000000000
--- a/spaces/monra/freegpt-webui/client/css/label.css
+++ /dev/null
@@ -1,16 +0,0 @@
-label {
- cursor: pointer;
- text-indent: -9999px;
- width: 50px;
- height: 30px;
- backdrop-filter: blur(20px);
- -webkit-backdrop-filter: blur(20px);
- background-color: var(--blur-bg);
- border-radius: var(--border-radius-1);
- border: 1px solid var(--blur-border);
- display: block;
- border-radius: 100px;
- position: relative;
- overflow: hidden;
- transition: 0.33s;
-}
diff --git a/spaces/mrstuffandthings/Bark-Voice-Cloning/setup.py b/spaces/mrstuffandthings/Bark-Voice-Cloning/setup.py
deleted file mode 100644
index 606849326a4002007fd42060b51e69a19c18675c..0000000000000000000000000000000000000000
--- a/spaces/mrstuffandthings/Bark-Voice-Cloning/setup.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from setuptools import setup
-
-setup()
diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/ema/__init__.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/ema/__init__.py
deleted file mode 100644
index 503ceaa609b092e48bd32a0031f4e2ffb875483f..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/ema/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import importlib
-import os
-
-from .ema import EMA
-
-
-def build_ema(model, cfg, device):
- return EMA(model, cfg, device)
-
-
-# automatically import any Python files in the models/ema/ directory
-for file in sorted(os.listdir(os.path.dirname(__file__))):
- if file.endswith(".py") and not file.startswith("_"):
- file_name = file[: file.find(".py")]
- importlib.import_module("fairseq.models.ema." + file_name)
diff --git a/spaces/mthsk/sovits-models-misc/hubert/__init__.py b/spaces/mthsk/sovits-models-misc/hubert/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/vae.py b/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/vae.py
deleted file mode 100644
index f83b71b82e40451571f5fbdbb3ca66a3cb26c65b..0000000000000000000000000000000000000000
--- a/spaces/mueller-franzes/medfusion-app/medical_diffusion/external/diffusers/vae.py
+++ /dev/null
@@ -1,857 +0,0 @@
-
-
-from typing import Optional, Tuple, Union
-from pathlib import Path
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from itertools import chain
-
-from .unet_blocks import UNetMidBlock2D, get_down_block, get_up_block
-from .taming_discriminator import NLayerDiscriminator
-from medical_diffusion.models import BasicModel
-from torchvision.utils import save_image
-
-from torch.distributions.normal import Normal
-from torch.distributions import kl_divergence
-
-class Encoder(nn.Module):
- def __init__(
- self,
- in_channels=3,
- out_channels=3,
- down_block_types=("DownEncoderBlock2D",),
- block_out_channels=(64),
- layers_per_block=2,
- norm_num_groups=32,
- act_fn="silu",
- double_z=True,
- ):
- super().__init__()
- self.layers_per_block = layers_per_block
-
- self.conv_in = torch.nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1)
-
- self.mid_block = None
- self.down_blocks = nn.ModuleList([])
-
- # down
- output_channel = block_out_channels[0]
- for i, down_block_type in enumerate(down_block_types):
- input_channel = output_channel
- output_channel = block_out_channels[i+1]
- is_final_block = False #i == len(block_out_channels) - 1
-
- down_block = get_down_block(
- down_block_type,
- num_layers=self.layers_per_block,
- in_channels=input_channel,
- out_channels=output_channel,
- add_downsample=not is_final_block,
- resnet_eps=1e-6,
- downsample_padding=0,
- resnet_act_fn=act_fn,
- resnet_groups=norm_num_groups,
- attn_num_head_channels=None,
- temb_channels=None,
- )
- self.down_blocks.append(down_block)
-
- # mid
- self.mid_block = UNetMidBlock2D(
- in_channels=block_out_channels[-1],
- resnet_eps=1e-6,
- resnet_act_fn=act_fn,
- output_scale_factor=1,
- resnet_time_scale_shift="default",
- attn_num_head_channels=None,
- resnet_groups=norm_num_groups,
- temb_channels=None,
- )
-
- # out
- self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
- self.conv_act = nn.SiLU()
-
- conv_out_channels = 2 * out_channels if double_z else out_channels
- self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
-
- def forward(self, x):
- sample = x
- sample = self.conv_in(sample)
-
- # down
- for down_block in self.down_blocks:
- sample = down_block(sample)
-
- # middle
- sample = self.mid_block(sample)
-
- # post-process
- sample = self.conv_norm_out(sample)
- sample = self.conv_act(sample)
- sample = self.conv_out(sample)
-
- return sample
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- in_channels=3,
- out_channels=3,
- up_block_types=("UpDecoderBlock2D",),
- block_out_channels=(64,),
- layers_per_block=2,
- norm_num_groups=32,
- act_fn="silu",
- ):
- super().__init__()
- self.layers_per_block = layers_per_block
-
- self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1)
-
- self.mid_block = None
- self.up_blocks = nn.ModuleList([])
-
- # mid
- self.mid_block = UNetMidBlock2D(
- in_channels=block_out_channels[-1],
- resnet_eps=1e-6,
- resnet_act_fn=act_fn,
- output_scale_factor=1,
- resnet_time_scale_shift="default",
- attn_num_head_channels=None,
- resnet_groups=norm_num_groups,
- temb_channels=None,
- )
-
- # up
- reversed_block_out_channels = list(reversed(block_out_channels))
- output_channel = reversed_block_out_channels[0]
- for i, up_block_type in enumerate(up_block_types):
- prev_output_channel = output_channel
- output_channel = reversed_block_out_channels[i+1]
-
- is_final_block = False # i == len(block_out_channels) - 1
-
- up_block = get_up_block(
- up_block_type,
- num_layers=self.layers_per_block + 1,
- in_channels=prev_output_channel,
- out_channels=output_channel,
- prev_output_channel=None,
- add_upsample=not is_final_block,
- resnet_eps=1e-6,
- resnet_act_fn=act_fn,
- resnet_groups=norm_num_groups,
- attn_num_head_channels=None,
- temb_channels=None,
- )
- self.up_blocks.append(up_block)
- prev_output_channel = output_channel
-
- # out
- self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
- self.conv_act = nn.SiLU()
- self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
-
- def forward(self, z):
- sample = z
- sample = self.conv_in(sample)
-
- # middle
- sample = self.mid_block(sample)
-
- # up
- for up_block in self.up_blocks:
- sample = up_block(sample)
-
- # post-process
- sample = self.conv_norm_out(sample)
- sample = self.conv_act(sample)
- sample = self.conv_out(sample)
-
- return sample
-
-
-class VectorQuantizer(nn.Module):
- """
- Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix
- multiplications and allows for post-hoc remapping of indices.
- """
-
- # NOTE: due to a bug the beta term was applied to the wrong term. for
- # backwards compatibility we use the buggy version by default, but you can
- # specify legacy=False to fix it.
- def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random", sane_index_shape=False, legacy=False):
- super().__init__()
- self.n_e = n_e
- self.e_dim = e_dim
- self.beta = beta
- self.legacy = legacy
-
- self.embedding = nn.Embedding(self.n_e, self.e_dim)
- self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
-
- self.remap = remap
- if self.remap is not None:
- self.register_buffer("used", torch.tensor(np.load(self.remap)))
- self.re_embed = self.used.shape[0]
- self.unknown_index = unknown_index # "random" or "extra" or integer
- if self.unknown_index == "extra":
- self.unknown_index = self.re_embed
- self.re_embed = self.re_embed + 1
- print(
- f"Remapping {self.n_e} indices to {self.re_embed} indices. "
- f"Using {self.unknown_index} for unknown indices."
- )
- else:
- self.re_embed = n_e
-
- self.sane_index_shape = sane_index_shape
-
- def remap_to_used(self, inds):
- ishape = inds.shape
- assert len(ishape) > 1
- inds = inds.reshape(ishape[0], -1)
- used = self.used.to(inds)
- match = (inds[:, :, None] == used[None, None, ...]).long()
- new = match.argmax(-1)
- unknown = match.sum(2) < 1
- if self.unknown_index == "random":
- new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
- else:
- new[unknown] = self.unknown_index
- return new.reshape(ishape)
-
- def unmap_to_all(self, inds):
- ishape = inds.shape
- assert len(ishape) > 1
- inds = inds.reshape(ishape[0], -1)
- used = self.used.to(inds)
- if self.re_embed > self.used.shape[0]: # extra token
- inds[inds >= self.used.shape[0]] = 0 # simply set to zero
- back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
- return back.reshape(ishape)
-
- def forward(self, z):
- # reshape z -> (batch, height, width, channel) and flatten
- z = z.permute(0, 2, 3, 1).contiguous()
- z_flattened = z.view(-1, self.e_dim)
- # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
-
- d = (
- torch.sum(z_flattened**2, dim=1, keepdim=True)
- + torch.sum(self.embedding.weight**2, dim=1)
- - 2 * torch.einsum("bd,dn->bn", z_flattened, self.embedding.weight.t())
- )
-
- min_encoding_indices = torch.argmin(d, dim=1)
- z_q = self.embedding(min_encoding_indices).view(z.shape)
- perplexity = None
- min_encodings = None
-
- # compute loss for embedding
- if not self.legacy:
- loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
- else:
- loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
-
- # preserve gradients
- z_q = z + (z_q - z).detach()
-
- # reshape back to match original input shape
- z_q = z_q.permute(0, 3, 1, 2).contiguous()
-
- if self.remap is not None:
- min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
- min_encoding_indices = self.remap_to_used(min_encoding_indices)
- min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
-
- if self.sane_index_shape:
- min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3])
-
- return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
-
- def get_codebook_entry(self, indices, shape):
- # shape specifying (batch, height, width, channel)
- if self.remap is not None:
- indices = indices.reshape(shape[0], -1) # add batch axis
- indices = self.unmap_to_all(indices)
- indices = indices.reshape(-1) # flatten again
-
- # get quantized latent vectors
- z_q = self.embedding(indices)
-
- if shape is not None:
- z_q = z_q.view(shape)
- # reshape back to match original input shape
- z_q = z_q.permute(0, 3, 1, 2).contiguous()
-
- return z_q
-
-
-class DiagonalGaussianDistribution(object):
- def __init__(self, parameters, deterministic=False):
- self.batch_size = parameters.shape[0]
- self.parameters = parameters
- self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
- # self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
- self.deterministic = deterministic
- self.std = torch.exp(0.5 * self.logvar)
- self.var = torch.exp(self.logvar)
- if self.deterministic:
- self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
-
- def sample(self, generator: Optional[torch.Generator] = None) -> torch.FloatTensor:
- device = self.parameters.device
- sample_device = "cpu" if device.type == "mps" else device
- sample = torch.randn(self.mean.shape, generator=generator, device=sample_device).to(device)
- x = self.mean + self.std * sample
- return x
-
- def kl(self, other=None):
- if self.deterministic:
- return torch.Tensor([0.0])
- else:
- if other is None:
- return 0.5 * torch.sum(torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar)/self.batch_size
- else:
- return 0.5 * torch.sum(
- torch.pow(self.mean - other.mean, 2) / other.var
- + self.var / other.var
- - 1.0
- - self.logvar
- + other.logvar,
- )/self.batch_size
-
- # q_z_x = Normal(self.mean, self.logvar.mul(.5).exp())
- # p_z = Normal(torch.zeros_like(self.mean), torch.ones_like(self.logvar))
- # kl_div = kl_divergence(q_z_x, p_z).sum(1).mean()
- # return kl_div
-
- def nll(self, sample, dims=[1, 2, 3]):
- if self.deterministic:
- return torch.Tensor([0.0])
- logtwopi = np.log(2.0 * np.pi)
- return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, dim=dims)
-
- def mode(self):
- return self.mean
-
-
-class VQModel(nn.Module):
- r"""VQ-VAE model from the paper Neural Discrete Representation Learning by Aaron van den Oord, Oriol Vinyals and Koray
- Kavukcuoglu.
-
- This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
- implements for all the model (such as downloading or saving, etc.)
-
- Parameters:
- in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
- out_channels (int, *optional*, defaults to 3): Number of channels in the output.
- down_block_types (`Tuple[str]`, *optional*, defaults to :
- obj:`("DownEncoderBlock2D",)`): Tuple of downsample block types.
- up_block_types (`Tuple[str]`, *optional*, defaults to :
- obj:`("UpDecoderBlock2D",)`): Tuple of upsample block types.
- block_out_channels (`Tuple[int]`, *optional*, defaults to :
- obj:`(64,)`): Tuple of block output channels.
- act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
- latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space.
- sample_size (`int`, *optional*, defaults to `32`): TODO
- num_vq_embeddings (`int`, *optional*, defaults to `256`): Number of codebook vectors in the VQ-VAE.
- """
-
-
- def __init__(
- self,
- in_channels: int = 3,
- out_channels: int = 3,
- down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"),
- up_block_types: Tuple[str] = ("UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"),
- block_out_channels: Tuple[int] = (32, 64, 128, 256),
- layers_per_block: int = 1,
- act_fn: str = "silu",
- latent_channels: int = 3,
- sample_size: int = 32,
- num_vq_embeddings: int = 256,
- norm_num_groups: int = 32,
- ):
- super().__init__()
-
- # pass init params to Encoder
- self.encoder = Encoder(
- in_channels=in_channels,
- out_channels=latent_channels,
- down_block_types=down_block_types,
- block_out_channels=block_out_channels,
- layers_per_block=layers_per_block,
- act_fn=act_fn,
- norm_num_groups=norm_num_groups,
- double_z=False,
- )
-
- self.quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1)
- self.quantize = VectorQuantizer(
- num_vq_embeddings, latent_channels, beta=0.25, remap=None, sane_index_shape=False
- )
- self.post_quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1)
-
- # pass init params to Decoder
- self.decoder = Decoder(
- in_channels=latent_channels,
- out_channels=out_channels,
- up_block_types=up_block_types,
- block_out_channels=block_out_channels,
- layers_per_block=layers_per_block,
- act_fn=act_fn,
- norm_num_groups=norm_num_groups,
- )
-
- # def encode(self, x: torch.FloatTensor):
- # z = self.encoder(x)
- # z = self.quant_conv(z)
- # return z
-
- def encode(self, x, return_loss=True, force_quantize= True):
- z = self.encoder(x)
- z = self.quant_conv(z)
-
- if force_quantize:
- z_q, emb_loss, _ = self.quantize(z)
- else:
- z_q, emb_loss = z, None
-
- if return_loss:
- return z_q, emb_loss
- else:
- return z_q
-
- def decode(self, z_q) -> torch.FloatTensor:
- z_q = self.post_quant_conv(z_q)
- x = self.decoder(z_q)
- return x
-
- # def decode(self, z: torch.FloatTensor, return_loss=True, force_quantize: bool = True) -> torch.FloatTensor:
- # if force_quantize:
- # z_q, emb_loss, _ = self.quantize(z)
- # else:
- # z_q, emb_loss = z, None
-
- # z_q = self.post_quant_conv(z_q)
- # x = self.decoder(z_q)
-
- # if return_loss:
- # return x, emb_loss
- # else:
- # return x
-
- def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor:
- r"""
- Args:
- sample (`torch.FloatTensor`): Input sample.
- """
- # h = self.encode(sample)
- h, emb_loss = self.encode(sample)
- dec = self.decode(h)
- # dec, emb_loss = self.decode(h)
-
- return dec, emb_loss
-
-
-class AutoencoderKL(nn.Module):
- r"""Variational Autoencoder (VAE) model with KL loss from the paper Auto-Encoding Variational Bayes by Diederik P. Kingma
- and Max Welling.
-
- This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
- implements for all the model (such as downloading or saving, etc.)
-
- Parameters:
- in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
- out_channels (int, *optional*, defaults to 3): Number of channels in the output.
- down_block_types (`Tuple[str]`, *optional*, defaults to :
- obj:`("DownEncoderBlock2D",)`): Tuple of downsample block types.
- up_block_types (`Tuple[str]`, *optional*, defaults to :
- obj:`("UpDecoderBlock2D",)`): Tuple of upsample block types.
- block_out_channels (`Tuple[int]`, *optional*, defaults to :
- obj:`(64,)`): Tuple of block output channels.
- act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
- latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space.
- sample_size (`int`, *optional*, defaults to `32`): TODO
- """
-
-
- def __init__(
- self,
- in_channels: int = 3,
- out_channels: int = 3,
- down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D","DownEncoderBlock2D",),
- up_block_types: Tuple[str] = ("UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D",),
- block_out_channels: Tuple[int] = (32, 64, 128, 128),
- layers_per_block: int = 1,
- act_fn: str = "silu",
- latent_channels: int = 3,
- norm_num_groups: int = 32,
- sample_size: int = 32,
- ):
- super().__init__()
-
- # pass init params to Encoder
- self.encoder = Encoder(
- in_channels=in_channels,
- out_channels=latent_channels,
- down_block_types=down_block_types,
- block_out_channels=block_out_channels,
- layers_per_block=layers_per_block,
- act_fn=act_fn,
- norm_num_groups=norm_num_groups,
- double_z=True,
- )
-
- # pass init params to Decoder
- self.decoder = Decoder(
- in_channels=latent_channels,
- out_channels=out_channels,
- up_block_types=up_block_types,
- block_out_channels=block_out_channels,
- layers_per_block=layers_per_block,
- norm_num_groups=norm_num_groups,
- act_fn=act_fn,
- )
-
- self.quant_conv = torch.nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
- self.post_quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1)
-
- def encode(self, x: torch.FloatTensor):
- h = self.encoder(x)
- moments = self.quant_conv(h)
- posterior = DiagonalGaussianDistribution(moments)
- return posterior
-
- def decode(self, z: torch.FloatTensor) -> torch.FloatTensor:
- z = self.post_quant_conv(z)
- dec = self.decoder(z)
- return dec
-
- def forward(
- self,
- sample: torch.FloatTensor,
- sample_posterior: bool = True,
- generator: Optional[torch.Generator] = None,
- ) -> torch.FloatTensor:
- r"""
- Args:
- sample (`torch.FloatTensor`): Input sample.
- sample_posterior (`bool`, *optional*, defaults to `False`):
- Whether to sample from the posterior.
- """
- x = sample
- posterior = self.encode(x)
- if sample_posterior:
- z = posterior.sample(generator=generator)
- else:
- z = posterior.mode()
- kl_loss = posterior.kl()
- dec = self.decode(z)
- return dec, kl_loss
-
-
-
-class VQVAEWrapper(BasicModel):
- def __init__(
- self,
- in_ch: int = 3,
- out_ch: int = 3,
- down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D",),
- up_block_types: Tuple[str] = ("UpDecoderBlock2D","UpDecoderBlock2D","UpDecoderBlock2D",),
- block_out_channels: Tuple[int] = (32, 64, 128, 256, ),
- layers_per_block: int = 1,
- act_fn: str = "silu",
- latent_channels: int = 3,
- sample_size: int = 32,
- num_vq_embeddings: int = 64,
- norm_num_groups: int = 32,
-
- optimizer=torch.optim.AdamW,
- optimizer_kwargs={},
- lr_scheduler=None,
- lr_scheduler_kwargs={},
- loss=torch.nn.MSELoss,
- loss_kwargs={}
- ):
- super().__init__(optimizer, optimizer_kwargs, lr_scheduler, lr_scheduler_kwargs, loss, loss_kwargs)
- self.model = VQModel(in_ch, out_ch, down_block_types, up_block_types, block_out_channels,
- layers_per_block, act_fn, latent_channels, sample_size, num_vq_embeddings, norm_num_groups)
-
- def forward(self, sample):
- return self.model(sample)
-
- def encode(self, x):
- z = self.model.encode(x, return_loss=False)
- return z
-
- def decode(self, z):
- x = self.model.decode(z)
- return x
-
- def _step(self, batch: dict, batch_idx: int, state: str, step: int, optimizer_idx:int):
- # ------------------------- Get Source/Target ---------------------------
- x = batch['source']
- target = x
-
- # ------------------------- Run Model ---------------------------
- pred, vq_loss = self(x)
-
- # ------------------------- Compute Loss ---------------------------
- loss = self.loss_fct(pred, target)
- loss += vq_loss
-
- # --------------------- Compute Metrics -------------------------------
- results = {'loss':loss}
- with torch.no_grad():
- results['L2'] = torch.nn.functional.mse_loss(pred, target)
- results['L1'] = torch.nn.functional.l1_loss(pred, target)
-
- # ----------------- Log Scalars ----------------------
- for metric_name, metric_val in results.items():
- self.log(f"{state}/{metric_name}", metric_val, batch_size=x.shape[0], on_step=True, on_epoch=True)
-
- # ----------------- Save Image ------------------------------
- if self.global_step != 0 and self.global_step % self.trainer.log_every_n_steps == 0:
- def norm(x):
- return (x-x.min())/(x.max()-x.min())
-
- images = [x, pred]
- log_step = self.global_step // self.trainer.log_every_n_steps
- path_out = Path(self.logger.log_dir)/'images'
- path_out.mkdir(parents=True, exist_ok=True)
- images = torch.cat([norm(img) for img in images])
- save_image(images, path_out/f'sample_{log_step}.png')
-
- return loss
-
-def hinge_d_loss(logits_real, logits_fake):
- loss_real = torch.mean(F.relu(1. - logits_real))
- loss_fake = torch.mean(F.relu(1. + logits_fake))
- d_loss = 0.5 * (loss_real + loss_fake)
- return d_loss
-
-def vanilla_d_loss(logits_real, logits_fake):
- d_loss = 0.5 * (
- torch.mean(F.softplus(-logits_real)) +
- torch.mean(F.softplus(logits_fake)))
- return d_loss
-
-class VQGAN(BasicModel):
- def __init__(
- self,
- in_ch: int = 3,
- out_ch: int = 3,
- down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D",),
- up_block_types: Tuple[str] = ("UpDecoderBlock2D","UpDecoderBlock2D","UpDecoderBlock2D",),
- block_out_channels: Tuple[int] = (32, 64, 128, 256, ),
- layers_per_block: int = 1,
- act_fn: str = "silu",
- latent_channels: int = 3,
- sample_size: int = 32,
- num_vq_embeddings: int = 64,
- norm_num_groups: int = 32,
-
- start_gan_train_step = 50000, # NOTE step increase with each optimizer
- gan_loss_weight: float = 1.0, # alias discriminator
- perceptual_loss_weight: float = 1.0,
- embedding_loss_weight: float = 1.0,
-
- optimizer=torch.optim.AdamW,
- optimizer_kwargs={},
- lr_scheduler=None,
- lr_scheduler_kwargs={},
- loss=torch.nn.MSELoss,
- loss_kwargs={}
- ):
- super().__init__(optimizer, optimizer_kwargs, lr_scheduler, lr_scheduler_kwargs, loss, loss_kwargs)
- self.vqvae = VQModel(in_ch, out_ch, down_block_types, up_block_types, block_out_channels, layers_per_block, act_fn,
- latent_channels, sample_size, num_vq_embeddings, norm_num_groups)
- self.discriminator = NLayerDiscriminator(in_ch)
- # self.perceiver = ... # Currently not supported, would require another trained NN
-
- self.start_gan_train_step = start_gan_train_step
- self.perceptual_loss_weight = perceptual_loss_weight
- self.gan_loss_weight = gan_loss_weight
- self.embedding_loss_weight = embedding_loss_weight
-
- def forward(self, x, condition=None):
- return self.vqvae(x)
-
- def encode(self, x):
- z = self.vqvae.encode(x, return_loss=False)
- return z
-
- def decode(self, z):
- x = self.vqvae.decode(z)
- return x
-
-
- def compute_lambda(self, rec_loss, gan_loss, eps=1e-4):
- """Computes adaptive weight as proposed in eq. 7 of https://arxiv.org/abs/2012.09841"""
- last_layer = self.vqvae.decoder.conv_out.weight
- rec_grads = torch.autograd.grad(rec_loss, last_layer, retain_graph=True)[0]
- gan_grads = torch.autograd.grad(gan_loss, last_layer, retain_graph=True)[0]
- d_weight = torch.norm(rec_grads) / (torch.norm(gan_grads) + eps)
- d_weight = torch.clamp(d_weight, 0.0, 1e4)
- return d_weight.detach()
-
-
-
- def _step(self, batch: dict, batch_idx: int, state: str, step: int, optimizer_idx:int):
- x = batch['source']
- # condition = batch.get('target', None)
-
- pred, vq_emb_loss = self.vqvae(x)
-
- if optimizer_idx == 0:
- # ------ VAE -------
- vq_img_loss = F.mse_loss(pred, x)
- vq_per_loss = 0.0 #self.perceiver(pred, x)
- rec_loss = vq_img_loss+self.perceptual_loss_weight*vq_per_loss
-
- # ------- GAN -----
- if step > self.start_gan_train_step:
- gan_loss = -torch.mean(self.discriminator(pred))
- lambda_weight = self.compute_lambda(rec_loss, gan_loss)
- gan_loss = gan_loss*lambda_weight
- else:
- gan_loss = torch.tensor([0.0], requires_grad=True, device=x.device)
-
- loss = self.gan_loss_weight*gan_loss+rec_loss+self.embedding_loss_weight*vq_emb_loss
-
- elif optimizer_idx == 1:
- if step > self.start_gan_train_step//2:
- logits_real = self.discriminator(x.detach())
- logits_fake = self.discriminator(pred.detach())
- loss = hinge_d_loss(logits_real, logits_fake)
- else:
- loss = torch.tensor([0.0], requires_grad=True, device=x.device)
-
- # --------------------- Compute Metrics -------------------------------
- results = {'loss':loss.detach(), f'loss_{optimizer_idx}':loss.detach()}
- with torch.no_grad():
- results[f'L2'] = torch.nn.functional.mse_loss(pred, x)
- results[f'L1'] = torch.nn.functional.l1_loss(pred, x)
-
- # ----------------- Log Scalars ----------------------
- for metric_name, metric_val in results.items():
- self.log(f"{state}/{metric_name}", metric_val, batch_size=x.shape[0], on_step=True, on_epoch=True)
-
- # ----------------- Save Image ------------------------------
- if self.global_step != 0 and self.global_step % self.trainer.log_every_n_steps == 0: # NOTE: step 1 (opt1) , step=2 (opt2), step=3 (opt1), ...
- def norm(x):
- return (x-x.min())/(x.max()-x.min())
-
- images = torch.cat([x, pred])
- log_step = self.global_step // self.trainer.log_every_n_steps
- path_out = Path(self.logger.log_dir)/'images'
- path_out.mkdir(parents=True, exist_ok=True)
- images = torch.stack([norm(img) for img in images])
- save_image(images, path_out/f'sample_{log_step}.png')
-
- return loss
-
- def configure_optimizers(self):
- opt_vae = self.optimizer(self.vqvae.parameters(), **self.optimizer_kwargs)
- opt_disc = self.optimizer(self.discriminator.parameters(), **self.optimizer_kwargs)
- if self.lr_scheduler is not None:
- scheduler = [
- {
- 'scheduler': self.lr_scheduler(opt_vae, **self.lr_scheduler_kwargs),
- 'interval': 'step',
- 'frequency': 1
- },
- {
- 'scheduler': self.lr_scheduler(opt_disc, **self.lr_scheduler_kwargs),
- 'interval': 'step',
- 'frequency': 1
- },
- ]
- else:
- scheduler = []
-
- return [opt_vae, opt_disc], scheduler
-
-class VAEWrapper(BasicModel):
- def __init__(
- self,
- in_ch: int = 3,
- out_ch: int = 3,
- down_block_types: Tuple[str] = ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"), # "DownEncoderBlock2D", "DownEncoderBlock2D",
- up_block_types: Tuple[str] = ("UpDecoderBlock2D", "UpDecoderBlock2D","UpDecoderBlock2D" ), # "UpDecoderBlock2D", "UpDecoderBlock2D",
- block_out_channels: Tuple[int] = (32, 64, 128, 256), # 128, 256
- layers_per_block: int = 1,
- act_fn: str = "silu",
- latent_channels: int = 3,
- norm_num_groups: int = 32,
- sample_size: int = 32,
-
- optimizer=torch.optim.AdamW,
- optimizer_kwargs={'lr':1e-4, 'weight_decay':1e-3, 'amsgrad':True},
- lr_scheduler=None,
- lr_scheduler_kwargs={},
- # loss=torch.nn.MSELoss, # WARNING: No Effect
- # loss_kwargs={'reduction': 'mean'}
- ):
- super().__init__(optimizer, optimizer_kwargs, lr_scheduler, lr_scheduler_kwargs ) # loss, loss_kwargs
- self.model = AutoencoderKL(in_ch, out_ch, down_block_types, up_block_types, block_out_channels,
- layers_per_block, act_fn, latent_channels, norm_num_groups, sample_size)
-
- self.logvar = nn.Parameter(torch.zeros(size=())) # Better weighting between KL and MSE, see (https://arxiv.org/abs/1903.05789), also used by Taming-Transfomer/Stable Diffusion
-
- def forward(self, sample):
- return self.model(sample)
-
- def encode(self, x):
- z = self.model.encode(x) # Latent space but not yet mapped to discrete embedding vectors
- return z.sample(generator=None)
-
- def decode(self, z):
- x = self.model.decode(z)
- return x
-
- def _step(self, batch: dict, batch_idx: int, state: str, step: int, optimizer_idx:int):
- # ------------------------- Get Source/Target ---------------------------
- x = batch['source']
- target = x
- HALF_LOG_TWO_PI = 0.91893 # log(2pi)/2
-
- # ------------------------- Run Model ---------------------------
- pred, kl_loss = self(x)
-
- # ------------------------- Compute Loss ---------------------------
- loss = torch.sum( torch.square(pred-target))/x.shape[0] #torch.sum( torch.square((pred-target)/torch.exp(self.logvar))/2 + self.logvar + HALF_LOG_TWO_PI )/x.shape[0]
- loss += kl_loss
-
- # --------------------- Compute Metrics -------------------------------
- results = {'loss':loss.detach()}
- with torch.no_grad():
- results['L2'] = torch.nn.functional.mse_loss(pred, target)
- results['L1'] = torch.nn.functional.l1_loss(pred, target)
-
- # ----------------- Log Scalars ----------------------
- for metric_name, metric_val in results.items():
- self.log(f"{state}/{metric_name}", metric_val, batch_size=x.shape[0], on_step=True, on_epoch=True)
-
- # ----------------- Save Image ------------------------------
- if self.global_step != 0 and self.global_step % self.trainer.log_every_n_steps == 0:
- def norm(x):
- return (x-x.min())/(x.max()-x.min())
-
- images = torch.cat([x, pred])
- log_step = self.global_step // self.trainer.log_every_n_steps
- path_out = Path(self.logger.log_dir)/'images'
- path_out.mkdir(parents=True, exist_ok=True)
- images = torch.stack([norm(img) for img in images])
- save_image(images, path_out/f'sample_{log_step}.png')
-
- return loss
\ No newline at end of file
diff --git a/spaces/multimodalart/latentdiffusion/latent-diffusion/ldm/models/autoencoder.py b/spaces/multimodalart/latentdiffusion/latent-diffusion/ldm/models/autoencoder.py
deleted file mode 100644
index 6a9c4f45498561953b8085981609b2a3298a5473..0000000000000000000000000000000000000000
--- a/spaces/multimodalart/latentdiffusion/latent-diffusion/ldm/models/autoencoder.py
+++ /dev/null
@@ -1,443 +0,0 @@
-import torch
-import pytorch_lightning as pl
-import torch.nn.functional as F
-from contextlib import contextmanager
-
-from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
-
-from ldm.modules.diffusionmodules.model import Encoder, Decoder
-from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
-
-from ldm.util import instantiate_from_config
-
-
-class VQModel(pl.LightningModule):
- def __init__(self,
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- batch_resize_range=None,
- scheduler_config=None,
- lr_g_factor=1.0,
- remap=None,
- sane_index_shape=False, # tell vector quantizer to return indices as bhw
- use_ema=False
- ):
- super().__init__()
- self.embed_dim = embed_dim
- self.n_embed = n_embed
- self.image_key = image_key
- self.encoder = Encoder(**ddconfig)
- self.decoder = Decoder(**ddconfig)
- self.loss = instantiate_from_config(lossconfig)
- self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
- remap=remap,
- sane_index_shape=sane_index_shape)
- self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
- if colorize_nlabels is not None:
- assert type(colorize_nlabels)==int
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
- if monitor is not None:
- self.monitor = monitor
- self.batch_resize_range = batch_resize_range
- if self.batch_resize_range is not None:
- print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
-
- self.use_ema = use_ema
- if self.use_ema:
- self.model_ema = LitEma(self)
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
-
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
- self.scheduler_config = scheduler_config
- self.lr_g_factor = lr_g_factor
-
- @contextmanager
- def ema_scope(self, context=None):
- if self.use_ema:
- self.model_ema.store(self.parameters())
- self.model_ema.copy_to(self)
- if context is not None:
- print(f"{context}: Switched to EMA weights")
- try:
- yield None
- finally:
- if self.use_ema:
- self.model_ema.restore(self.parameters())
- if context is not None:
- print(f"{context}: Restored training weights")
-
- def init_from_ckpt(self, path, ignore_keys=list()):
- sd = torch.load(path, map_location="cpu")["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- missing, unexpected = self.load_state_dict(sd, strict=False)
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
- print(f"Missing Keys: {missing}")
- print(f"Unexpected Keys: {unexpected}")
-
- def on_train_batch_end(self, *args, **kwargs):
- if self.use_ema:
- self.model_ema(self)
-
- def encode(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- quant, emb_loss, info = self.quantize(h)
- return quant, emb_loss, info
-
- def encode_to_prequant(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- return h
-
- def decode(self, quant):
- quant = self.post_quant_conv(quant)
- dec = self.decoder(quant)
- return dec
-
- def decode_code(self, code_b):
- quant_b = self.quantize.embed_code(code_b)
- dec = self.decode(quant_b)
- return dec
-
- def forward(self, input, return_pred_indices=False):
- quant, diff, (_,_,ind) = self.encode(input)
- dec = self.decode(quant)
- if return_pred_indices:
- return dec, diff, ind
- return dec, diff
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
- if self.batch_resize_range is not None:
- lower_size = self.batch_resize_range[0]
- upper_size = self.batch_resize_range[1]
- if self.global_step <= 4:
- # do the first few batches with max size to avoid later oom
- new_resize = upper_size
- else:
- new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
- if new_resize != x.shape[2]:
- x = F.interpolate(x, size=new_resize, mode="bicubic")
- x = x.detach()
- return x
-
- def training_step(self, batch, batch_idx, optimizer_idx):
- # https://github.com/pytorch/pytorch/issues/37142
- # try not to fool the heuristics
- x = self.get_input(batch, self.image_key)
- xrec, qloss, ind = self(x, return_pred_indices=True)
-
- if optimizer_idx == 0:
- # autoencode
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train",
- predicted_indices=ind)
-
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
- return aeloss
-
- if optimizer_idx == 1:
- # discriminator
- discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
- return discloss
-
- def validation_step(self, batch, batch_idx):
- log_dict = self._validation_step(batch, batch_idx)
- with self.ema_scope():
- log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
- return log_dict
-
- def _validation_step(self, batch, batch_idx, suffix=""):
- x = self.get_input(batch, self.image_key)
- xrec, qloss, ind = self(x, return_pred_indices=True)
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val"+suffix,
- predicted_indices=ind
- )
-
- discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val"+suffix,
- predicted_indices=ind
- )
- rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
- self.log(f"val{suffix}/rec_loss", rec_loss,
- prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
- self.log(f"val{suffix}/aeloss", aeloss,
- prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
- if version.parse(pl.__version__) >= version.parse('1.4.0'):
- del log_dict_ae[f"val{suffix}/rec_loss"]
- self.log_dict(log_dict_ae)
- self.log_dict(log_dict_disc)
- return self.log_dict
-
- def configure_optimizers(self):
- lr_d = self.learning_rate
- lr_g = self.lr_g_factor*self.learning_rate
- print("lr_d", lr_d)
- print("lr_g", lr_g)
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
- list(self.decoder.parameters())+
- list(self.quantize.parameters())+
- list(self.quant_conv.parameters())+
- list(self.post_quant_conv.parameters()),
- lr=lr_g, betas=(0.5, 0.9))
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
- lr=lr_d, betas=(0.5, 0.9))
-
- if self.scheduler_config is not None:
- scheduler = instantiate_from_config(self.scheduler_config)
-
- print("Setting up LambdaLR scheduler...")
- scheduler = [
- {
- 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- },
- {
- 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- },
- ]
- return [opt_ae, opt_disc], scheduler
- return [opt_ae, opt_disc], []
-
- def get_last_layer(self):
- return self.decoder.conv_out.weight
-
- def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(self.device)
- if only_inputs:
- log["inputs"] = x
- return log
- xrec, _ = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["inputs"] = x
- log["reconstructions"] = xrec
- if plot_ema:
- with self.ema_scope():
- xrec_ema, _ = self(x)
- if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
- log["reconstructions_ema"] = xrec_ema
- return log
-
- def to_rgb(self, x):
- assert self.image_key == "segmentation"
- if not hasattr(self, "colorize"):
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
- x = F.conv2d(x, weight=self.colorize)
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
- return x
-
-
-class VQModelInterface(VQModel):
- def __init__(self, embed_dim, *args, **kwargs):
- super().__init__(embed_dim=embed_dim, *args, **kwargs)
- self.embed_dim = embed_dim
-
- def encode(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- return h
-
- def decode(self, h, force_not_quantize=False):
- # also go through quantization layer
- if not force_not_quantize:
- quant, emb_loss, info = self.quantize(h)
- else:
- quant = h
- quant = self.post_quant_conv(quant)
- dec = self.decoder(quant)
- return dec
-
-
-class AutoencoderKL(pl.LightningModule):
- def __init__(self,
- ddconfig,
- lossconfig,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- ):
- super().__init__()
- self.image_key = image_key
- self.encoder = Encoder(**ddconfig)
- self.decoder = Decoder(**ddconfig)
- self.loss = instantiate_from_config(lossconfig)
- assert ddconfig["double_z"]
- self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
- self.embed_dim = embed_dim
- if colorize_nlabels is not None:
- assert type(colorize_nlabels)==int
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
- if monitor is not None:
- self.monitor = monitor
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
-
- def init_from_ckpt(self, path, ignore_keys=list()):
- sd = torch.load(path, map_location="cpu")["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- self.load_state_dict(sd, strict=False)
- print(f"Restored from {path}")
-
- def encode(self, x):
- h = self.encoder(x)
- moments = self.quant_conv(h)
- posterior = DiagonalGaussianDistribution(moments)
- return posterior
-
- def decode(self, z):
- z = self.post_quant_conv(z)
- dec = self.decoder(z)
- return dec
-
- def forward(self, input, sample_posterior=True):
- posterior = self.encode(input)
- if sample_posterior:
- z = posterior.sample()
- else:
- z = posterior.mode()
- dec = self.decode(z)
- return dec, posterior
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
- return x
-
- def training_step(self, batch, batch_idx, optimizer_idx):
- inputs = self.get_input(batch, self.image_key)
- reconstructions, posterior = self(inputs)
-
- if optimizer_idx == 0:
- # train encoder+decoder+logvar
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
- self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
- return aeloss
-
- if optimizer_idx == 1:
- # train the discriminator
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
-
- self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
- return discloss
-
- def validation_step(self, batch, batch_idx):
- inputs = self.get_input(batch, self.image_key)
- reconstructions, posterior = self(inputs)
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
- last_layer=self.get_last_layer(), split="val")
-
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
- last_layer=self.get_last_layer(), split="val")
-
- self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
- self.log_dict(log_dict_ae)
- self.log_dict(log_dict_disc)
- return self.log_dict
-
- def configure_optimizers(self):
- lr = self.learning_rate
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
- list(self.decoder.parameters())+
- list(self.quant_conv.parameters())+
- list(self.post_quant_conv.parameters()),
- lr=lr, betas=(0.5, 0.9))
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
- lr=lr, betas=(0.5, 0.9))
- return [opt_ae, opt_disc], []
-
- def get_last_layer(self):
- return self.decoder.conv_out.weight
-
- @torch.no_grad()
- def log_images(self, batch, only_inputs=False, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(self.device)
- if not only_inputs:
- xrec, posterior = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["samples"] = self.decode(torch.randn_like(posterior.sample()))
- log["reconstructions"] = xrec
- log["inputs"] = x
- return log
-
- def to_rgb(self, x):
- assert self.image_key == "segmentation"
- if not hasattr(self, "colorize"):
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
- x = F.conv2d(x, weight=self.colorize)
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
- return x
-
-
-class IdentityFirstStage(torch.nn.Module):
- def __init__(self, *args, vq_interface=False, **kwargs):
- self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
- super().__init__()
-
- def encode(self, x, *args, **kwargs):
- return x
-
- def decode(self, x, *args, **kwargs):
- return x
-
- def quantize(self, x, *args, **kwargs):
- if self.vq_interface:
- return x, None, [None, None, None]
- return x
-
- def forward(self, x, *args, **kwargs):
- return x
diff --git a/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/layers_WS.py b/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/layers_WS.py
deleted file mode 100644
index 51085989c4f090d4dc5f599be3c550d16ec0b2e7..0000000000000000000000000000000000000000
--- a/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/layers_WS.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Modified by Nikita Selin (OPHoperHPO)[https://github.com/OPHoperHPO].
-Source url: https://github.com/MarcoForte/FBA_Matting
-License: MIT License
-"""
-import torch
-import torch.nn as nn
-from torch.nn import functional as F
-
-
-class Conv2d(nn.Conv2d):
- def __init__(
- self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True,
- ):
- super(Conv2d, self).__init__(
- in_channels,
- out_channels,
- kernel_size,
- stride,
- padding,
- dilation,
- groups,
- bias,
- )
-
- def forward(self, x):
- # return super(Conv2d, self).forward(x)
- weight = self.weight
- weight_mean = (
- weight.mean(dim=1, keepdim=True)
- .mean(dim=2, keepdim=True)
- .mean(dim=3, keepdim=True)
- )
- weight = weight - weight_mean
- # std = (weight).view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
- std = (
- torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view(
- -1, 1, 1, 1
- )
- + 1e-5
- )
- weight = weight / std.expand_as(weight)
- return F.conv2d(
- x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups
- )
-
-
-def BatchNorm2d(num_features):
- return nn.GroupNorm(num_channels=num_features, num_groups=32)
diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/ffc.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/ffc.py
deleted file mode 100644
index 2f8aeb1411fc1537916275fd3243706cc74b8d3c..0000000000000000000000000000000000000000
--- a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/modules/ffc.py
+++ /dev/null
@@ -1,433 +0,0 @@
-# Fast Fourier Convolution NeurIPS 2020
-# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py
-# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from saicinpainting.training.modules.base import get_activation, BaseDiscriminator
-from saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper
-from saicinpainting.training.modules.squeeze_excitation import SELayer
-from saicinpainting.utils import get_shape
-
-
-class FFCSE_block(nn.Module):
-
- def __init__(self, channels, ratio_g):
- super(FFCSE_block, self).__init__()
- in_cg = int(channels * ratio_g)
- in_cl = channels - in_cg
- r = 16
-
- self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
- self.conv1 = nn.Conv2d(channels, channels // r,
- kernel_size=1, bias=True)
- self.relu1 = nn.ReLU(inplace=True)
- self.conv_a2l = None if in_cl == 0 else nn.Conv2d(
- channels // r, in_cl, kernel_size=1, bias=True)
- self.conv_a2g = None if in_cg == 0 else nn.Conv2d(
- channels // r, in_cg, kernel_size=1, bias=True)
- self.sigmoid = nn.Sigmoid()
-
- def forward(self, x):
- x = x if type(x) is tuple else (x, 0)
- id_l, id_g = x
-
- x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1)
- x = self.avgpool(x)
- x = self.relu1(self.conv1(x))
-
- x_l = 0 if self.conv_a2l is None else id_l * \
- self.sigmoid(self.conv_a2l(x))
- x_g = 0 if self.conv_a2g is None else id_g * \
- self.sigmoid(self.conv_a2g(x))
- return x_l, x_g
-
-
-class FourierUnit(nn.Module):
-
- def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear',
- spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'):
- # bn_layer not used
- super(FourierUnit, self).__init__()
- self.groups = groups
-
- self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0),
- out_channels=out_channels * 2,
- kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False)
- self.bn = torch.nn.BatchNorm2d(out_channels * 2)
- self.relu = torch.nn.ReLU(inplace=True)
-
- # squeeze and excitation block
- self.use_se = use_se
- if use_se:
- if se_kwargs is None:
- se_kwargs = {}
- self.se = SELayer(self.conv_layer.in_channels, **se_kwargs)
-
- self.spatial_scale_factor = spatial_scale_factor
- self.spatial_scale_mode = spatial_scale_mode
- self.spectral_pos_encoding = spectral_pos_encoding
- self.ffc3d = ffc3d
- self.fft_norm = fft_norm
-
- def forward(self, x):
- batch = x.shape[0]
-
- if self.spatial_scale_factor is not None:
- orig_size = x.shape[-2:]
- x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False)
-
- r_size = x.size()
- # (batch, c, h, w/2+1, 2)
- fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1)
- ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm)
- ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
- ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1)
- ffted = ffted.view((batch, -1,) + ffted.size()[3:])
-
- if self.spectral_pos_encoding:
- height, width = ffted.shape[-2:]
- coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted)
- coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted)
- ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1)
-
- if self.use_se:
- ffted = self.se(ffted)
-
- ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1)
- ffted = self.relu(self.bn(ffted))
-
- ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute(
- 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2)
- ffted = torch.complex(ffted[..., 0], ffted[..., 1])
-
- ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:]
- output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm)
-
- if self.spatial_scale_factor is not None:
- output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False)
-
- return output
-
-
-class SpectralTransform(nn.Module):
-
- def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, **fu_kwargs):
- # bn_layer not used
- super(SpectralTransform, self).__init__()
- self.enable_lfu = enable_lfu
- if stride == 2:
- self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
- else:
- self.downsample = nn.Identity()
-
- self.stride = stride
- self.conv1 = nn.Sequential(
- nn.Conv2d(in_channels, out_channels //
- 2, kernel_size=1, groups=groups, bias=False),
- nn.BatchNorm2d(out_channels // 2),
- nn.ReLU(inplace=True)
- )
- self.fu = FourierUnit(
- out_channels // 2, out_channels // 2, groups, **fu_kwargs)
- if self.enable_lfu:
- self.lfu = FourierUnit(
- out_channels // 2, out_channels // 2, groups)
- self.conv2 = torch.nn.Conv2d(
- out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False)
-
- def forward(self, x):
-
- x = self.downsample(x)
- x = self.conv1(x)
- output = self.fu(x)
-
- if self.enable_lfu:
- n, c, h, w = x.shape
- split_no = 2
- split_s = h // split_no
- xs = torch.cat(torch.split(
- x[:, :c // 4], split_s, dim=-2), dim=1).contiguous()
- xs = torch.cat(torch.split(xs, split_s, dim=-1),
- dim=1).contiguous()
- xs = self.lfu(xs)
- xs = xs.repeat(1, 1, split_no, split_no).contiguous()
- else:
- xs = 0
-
- output = self.conv2(x + output + xs)
-
- return output
-
-
-class FFC(nn.Module):
-
- def __init__(self, in_channels, out_channels, kernel_size,
- ratio_gin, ratio_gout, stride=1, padding=0,
- dilation=1, groups=1, bias=False, enable_lfu=True,
- padding_type='reflect', gated=False, **spectral_kwargs):
- super(FFC, self).__init__()
-
- assert stride == 1 or stride == 2, "Stride should be 1 or 2."
- self.stride = stride
-
- in_cg = int(in_channels * ratio_gin)
- in_cl = in_channels - in_cg
- out_cg = int(out_channels * ratio_gout)
- out_cl = out_channels - out_cg
- #groups_g = 1 if groups == 1 else int(groups * ratio_gout)
- #groups_l = 1 if groups == 1 else groups - groups_g
-
- self.ratio_gin = ratio_gin
- self.ratio_gout = ratio_gout
- self.global_in_num = in_cg
-
- module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d
- self.convl2l = module(in_cl, out_cl, kernel_size,
- stride, padding, dilation, groups, bias, padding_mode=padding_type)
- module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d
- self.convl2g = module(in_cl, out_cg, kernel_size,
- stride, padding, dilation, groups, bias, padding_mode=padding_type)
- module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d
- self.convg2l = module(in_cg, out_cl, kernel_size,
- stride, padding, dilation, groups, bias, padding_mode=padding_type)
- module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform
- self.convg2g = module(
- in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs)
-
- self.gated = gated
- module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d
- self.gate = module(in_channels, 2, 1)
-
- def forward(self, x):
- x_l, x_g = x if type(x) is tuple else (x, 0)
- out_xl, out_xg = 0, 0
-
- if self.gated:
- total_input_parts = [x_l]
- if torch.is_tensor(x_g):
- total_input_parts.append(x_g)
- total_input = torch.cat(total_input_parts, dim=1)
-
- gates = torch.sigmoid(self.gate(total_input))
- g2l_gate, l2g_gate = gates.chunk(2, dim=1)
- else:
- g2l_gate, l2g_gate = 1, 1
-
- if self.ratio_gout != 1:
- out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate
- if self.ratio_gout != 0:
- out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g)
-
- return out_xl, out_xg
-
-
-class FFC_BN_ACT(nn.Module):
-
- def __init__(self, in_channels, out_channels,
- kernel_size, ratio_gin, ratio_gout,
- stride=1, padding=0, dilation=1, groups=1, bias=False,
- norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity,
- padding_type='reflect',
- enable_lfu=True, **kwargs):
- super(FFC_BN_ACT, self).__init__()
- self.ffc = FFC(in_channels, out_channels, kernel_size,
- ratio_gin, ratio_gout, stride, padding, dilation,
- groups, bias, enable_lfu, padding_type=padding_type, **kwargs)
- lnorm = nn.Identity if ratio_gout == 1 else norm_layer
- gnorm = nn.Identity if ratio_gout == 0 else norm_layer
- global_channels = int(out_channels * ratio_gout)
- self.bn_l = lnorm(out_channels - global_channels)
- self.bn_g = gnorm(global_channels)
-
- lact = nn.Identity if ratio_gout == 1 else activation_layer
- gact = nn.Identity if ratio_gout == 0 else activation_layer
- self.act_l = lact(inplace=True)
- self.act_g = gact(inplace=True)
-
- def forward(self, x):
- x_l, x_g = self.ffc(x)
- x_l = self.act_l(self.bn_l(x_l))
- x_g = self.act_g(self.bn_g(x_g))
- return x_l, x_g
-
-
-class FFCResnetBlock(nn.Module):
- def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1,
- spatial_transform_kwargs=None, inline=False, **conv_kwargs):
- super().__init__()
- self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
- norm_layer=norm_layer,
- activation_layer=activation_layer,
- padding_type=padding_type,
- **conv_kwargs)
- self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
- norm_layer=norm_layer,
- activation_layer=activation_layer,
- padding_type=padding_type,
- **conv_kwargs)
- if spatial_transform_kwargs is not None:
- self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs)
- self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs)
- self.inline = inline
-
- def forward(self, x):
- if self.inline:
- x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:]
- else:
- x_l, x_g = x if type(x) is tuple else (x, 0)
-
- id_l, id_g = x_l, x_g
-
- x_l, x_g = self.conv1((x_l, x_g))
- x_l, x_g = self.conv2((x_l, x_g))
-
- x_l, x_g = id_l + x_l, id_g + x_g
- out = x_l, x_g
- if self.inline:
- out = torch.cat(out, dim=1)
- return out
-
-
-class ConcatTupleLayer(nn.Module):
- def forward(self, x):
- assert isinstance(x, tuple)
- x_l, x_g = x
- assert torch.is_tensor(x_l) or torch.is_tensor(x_g)
- if not torch.is_tensor(x_g):
- return x_l
- return torch.cat(x, dim=1)
-
-
-class FFCResNetGenerator(nn.Module):
- def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
- padding_type='reflect', activation_layer=nn.ReLU,
- up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True),
- init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={},
- spatial_transform_layers=None, spatial_transform_kwargs={},
- add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}):
- assert (n_blocks >= 0)
- super().__init__()
-
- model = [nn.ReflectionPad2d(3),
- FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer,
- activation_layer=activation_layer, **init_conv_kwargs)]
-
- ### downsample
- for i in range(n_downsampling):
- mult = 2 ** i
- if i == n_downsampling - 1:
- cur_conv_kwargs = dict(downsample_conv_kwargs)
- cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0)
- else:
- cur_conv_kwargs = downsample_conv_kwargs
- model += [FFC_BN_ACT(min(max_features, ngf * mult),
- min(max_features, ngf * mult * 2),
- kernel_size=3, stride=2, padding=1,
- norm_layer=norm_layer,
- activation_layer=activation_layer,
- **cur_conv_kwargs)]
-
- mult = 2 ** n_downsampling
- feats_num_bottleneck = min(max_features, ngf * mult)
-
- ### resnet blocks
- for i in range(n_blocks):
- cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer,
- norm_layer=norm_layer, **resnet_conv_kwargs)
- if spatial_transform_layers is not None and i in spatial_transform_layers:
- cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs)
- model += [cur_resblock]
-
- model += [ConcatTupleLayer()]
-
- ### upsample
- for i in range(n_downsampling):
- mult = 2 ** (n_downsampling - i)
- model += [nn.ConvTranspose2d(min(max_features, ngf * mult),
- min(max_features, int(ngf * mult / 2)),
- kernel_size=3, stride=2, padding=1, output_padding=1),
- up_norm_layer(min(max_features, int(ngf * mult / 2))),
- up_activation]
-
- if out_ffc:
- model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer,
- norm_layer=norm_layer, inline=True, **out_ffc_kwargs)]
-
- model += [nn.ReflectionPad2d(3),
- nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
- if add_out_act:
- model.append(get_activation('tanh' if add_out_act is True else add_out_act))
- self.model = nn.Sequential(*model)
-
- def forward(self, input):
- return self.model(input)
-
-
-class FFCNLayerDiscriminator(BaseDiscriminator):
- def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512,
- init_conv_kwargs={}, conv_kwargs={}):
- super().__init__()
- self.n_layers = n_layers
-
- def _act_ctor(inplace=True):
- return nn.LeakyReLU(negative_slope=0.2, inplace=inplace)
-
- kw = 3
- padw = int(np.ceil((kw-1.0)/2))
- sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer,
- activation_layer=_act_ctor, **init_conv_kwargs)]]
-
- nf = ndf
- for n in range(1, n_layers):
- nf_prev = nf
- nf = min(nf * 2, max_features)
-
- cur_model = [
- FFC_BN_ACT(nf_prev, nf,
- kernel_size=kw, stride=2, padding=padw,
- norm_layer=norm_layer,
- activation_layer=_act_ctor,
- **conv_kwargs)
- ]
- sequence.append(cur_model)
-
- nf_prev = nf
- nf = min(nf * 2, 512)
-
- cur_model = [
- FFC_BN_ACT(nf_prev, nf,
- kernel_size=kw, stride=1, padding=padw,
- norm_layer=norm_layer,
- activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs),
- **conv_kwargs),
- ConcatTupleLayer()
- ]
- sequence.append(cur_model)
-
- sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
-
- for n in range(len(sequence)):
- setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
-
- def get_all_activations(self, x):
- res = [x]
- for n in range(self.n_layers + 2):
- model = getattr(self, 'model' + str(n))
- res.append(model(res[-1]))
- return res[1:]
-
- def forward(self, x):
- act = self.get_all_activations(x)
- feats = []
- for out in act[:-1]:
- if isinstance(out, tuple):
- if torch.is_tensor(out[1]):
- out = torch.cat(out, dim=1)
- else:
- out = out[0]
- feats.append(out)
- return act[-1], feats
diff --git a/spaces/nateraw/dockerplayground/start_server.sh b/spaces/nateraw/dockerplayground/start_server.sh
deleted file mode 100644
index 01ae830fd64c84e2f82d64836d3ef8e42e712361..0000000000000000000000000000000000000000
--- a/spaces/nateraw/dockerplayground/start_server.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-echo "Starting Jupyter Lab with token $JUPYTER_TOKEN"
-
-jupyter-lab \
- --ip 0.0.0.0 \
- --port 7860 \
- --no-browser \
- --allow-root \
- --ServerApp.token="$JUPYTER_TOKEN" \
- --ServerApp.tornado_settings="{'headers': {'Content-Security-Policy': 'frame-ancestors *'}}" \
- --ServerApp.disable_check_xsrf=True \
- --ServerApp.cookie_options="{'SameSite': 'None', 'Secure': True}"
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Office 2013 Ita Preattivato Torrent.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Office 2013 Ita Preattivato Torrent.md
deleted file mode 100644
index 26d48a17e4cb8cc24d5c2333082e2d7ff0a5370e..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Office 2013 Ita Preattivato Torrent.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-How to Download and Install Office 2013 ITA Preattivato Torrent
-Office 2013 is a popular productivity suite that includes applications such as Word, Excel, PowerPoint, Outlook, and more. If you want to use Office 2013 in Italian, you can download and install a pre-activated version from a torrent site. However, you should be aware of the risks and legal issues involved in using pirated software.
-In this article, we will show you how to download and install Office 2013 ITA preattivato torrent safely and easily. Follow these steps:
-office 2013 ita preattivato torrent Download ✅ https://urlcod.com/2uI9O3
-
-Find a reliable torrent site that offers Office 2013 ITA preattivato torrent. You can use a search engine or a torrent aggregator to find one. Some examples are The Pirate Bay, Kickass Torrents, and RARBG.
-Download a torrent client that can handle magnet links. A torrent client is a software that allows you to download files from other users who are sharing them. Some examples are uTorrent, BitTorrent, and qBittorrent.
-Open the torrent site and search for Office 2013 ITA preattivato torrent. You should see a list of results with different file sizes and seeders. Seeders are users who have the complete file and are uploading it to others. Choose the one with the most seeders and the smallest file size.
-Click on the magnet link or the download button to start downloading the torrent file. A magnet link is a URL that contains information about the file, such as its name, size, and hash. A download button will download a small file that contains the same information.
-Open the torrent file with your torrent client. It will automatically connect to other users who have the file and start downloading it. You can monitor the progress and speed of the download in your torrent client.
-Once the download is complete, you will have a folder with the Office 2013 ITA preattivato files. You can open it and run the setup.exe file to install Office 2013 on your computer. You do not need to enter any product key or activation code as it is already pre-activated.
-
-Congratulations! You have successfully downloaded and installed Office 2013 ITA preattivato torrent on your computer. You can now enjoy using Office 2013 in Italian for free.
-However, you should also be aware of the potential risks and legal issues involved in using pirated software. Pirated software may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Pirated software may also not receive updates or support from Microsoft, which can affect its performance and security. Pirated software may also violate the intellectual property rights of Microsoft and other software developers, which can result in legal consequences or fines.
-
-Therefore, we recommend that you use only genuine and licensed software from official sources. You can purchase Office 2013 from Microsoft's website or authorized resellers. You can also use alternative productivity suites that are free and legal, such as LibreOffice, OpenOffice, or Google Docs.
-We hope this article was helpful and informative. Thank you for reading!
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/nikhedward/TL-DR_summarize_it/app.py b/spaces/nikhedward/TL-DR_summarize_it/app.py
deleted file mode 100644
index 408752af3cfe79321083f7fd4f7bea8f23ff2ff8..0000000000000000000000000000000000000000
--- a/spaces/nikhedward/TL-DR_summarize_it/app.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import gradio as gr
-import transformers
-from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
-
-title = " Text Summarizer 📝"
-
-
-text_1 = """
-Miss Brill is the story of an old woman told brilliantly and realistically, balancing thoughts and emotions that sustain her late solitary life amidst all the bustle of modern life. Miss Brill is a regular visitor on Sundays to the Jardins Publiques (the Public Gardens) of a small French suburb where she sits and watches all sorts of people come and go. She listens to the band playing, loves to watch people and guess what keeps them going, and enjoys contemplating the world as a great stage upon which actors perform. She finds herself to be another actor among the so many she sees, or at least herself as 'part of the performance after all.' One Sunday Miss Brill puts on her fur and goes to the Public Gardens as usual. The evening ends with her sudden realization that she is old and lonely, a realization brought to her by a conversation she overhears between a boy and a girl, presumably lovers, who comment on her unwelcome presence in their vicinity. Miss Brill is sad and depressed as she returns home, not stopping by as usual to buy her Sunday delicacy, a slice of honey-cake. She retires to her dark room, puts the fur back into the box and imagines that she has heard something cry.
-"""
-
-text_2 = """
-Senior British royals, including Prince William and his wife, Duchess Kate, went to church on Easter Sunday without the queen. Queen Elizabeth II, who has been experiencing mobility problems, did not attend the service at St. George's Chapel on the grounds of Windsor Castle, a fixture in the royals' calendar. William and Kate, known as the Duke and Duchess of Cambridge, were accompanied by two of their three children: Prince George, 8, and Princess Charlotte, 6. Also in attendance were the queen's youngest son, Prince Edward, with his wife Sophie and their children, and Princess Eugenie, the daughter of Prince Andrew. Last week, she had a visit from her grandson Prince Harry and his wife Meghan, a spokesperson for the couple confirmed to USA TODAY – the first time the couple has visited the U.K. together since they stepped down as working royals in 2020 and moved to California.
-
-"""
-text_3 = """
-In the article “Bats,” by Debbie Dean, we learn that in contrast to some mistaken beliefs, bats have sight, are mammals, and are not especially likely to carry rabies. Bats are relatively misunderstood and unappreciated. Bats have some interesting physical features. They have similar bone structure and skeletons to that of humans, so they are not winged rodents. They are color blind, so they use echolocation if there is not sufficient light. Otherwise, their sight is enough. Species of bats total about a thousand. The species come in a variety of sizes and have unique diets. Most eat insects, but some eat plant products and small animals. However, vampire bats drink blood, which can be harmful to livestock. Farmers have accidentally killed many helpful bats while trying to rid themselves of vampire bats. Bats can actually be helpful to humans. They destroy unwanted bugs, spread fruit seeds, and pollinate plants. However, the survival of bats is not known because many are killed by human disruptions and predators. The bat population has dropped steadily and may continue to drop. Hopefully, we will realize that although bats look different than our favorite animals, we
-can learn to accept and admire their uniqueness
-"""
-
-
-
-
-sample_texts = [[text_1], [text_2], [text_3]]
-
-desc = """
-This is an abstractive text summarizer app using fine-tuned bart-large-cnn model. The abstractive approach involves rephrasing the complete document while capturing the complete meaning of the document. This type of summarization provides more human-like summary.
-
Note: For faster summaries input smaller texts. Current model supports context size of 1024 words, and anything longer is truncated'p>
-
Sample text inputs are provided at the bottom!
-"""
-
-
-
-model_name = "nikhedward/bart-large-cnn-finetuned-multi-news"
-tokenizer = AutoTokenizer.from_pretrained(model_name)
-model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
-
-def auto_summarize(inp):
- inp = inp.replace('\n','')
- inp = tokenizer.encode(inp, return_tensors='pt', max_length=1024, truncation=True)
- summary_ids = model.generate(inp, num_beams=4, max_length=150, early_stopping=True, do_sample=True, top_k=50, top_p=0.95)
- summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
- return summary
-
-interface = gr.Interface(fn=auto_summarize, inputs=gr.inputs.Textbox(lines=10, label="Input Text"), description = desc, theme = "dark-peach",
-examples = sample_texts, title = title, outputs="text", css=".footer{display:none !important}")
-
-interface.launch()
-
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/README.md b/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/README.md
deleted file mode 100644
index 912cc29927542bfe4258d3208cf52d73cb0ea477..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-This directory provides definitions for a few common models, dataloaders, scheduler,
-and optimizers that are often used in training.
-The definition of these objects are provided in the form of lazy instantiation:
-their arguments can be edited by users before constructing the objects.
-
-They can be imported, or loaded by `model_zoo.get_config` API in users' own configs.
diff --git a/spaces/nupurkmr9/concept-ablation/__init__.py b/spaces/nupurkmr9/concept-ablation/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/oliver2023/chatgpt-on-wechat/channel/wechat/wechaty_channel.py b/spaces/oliver2023/chatgpt-on-wechat/channel/wechat/wechaty_channel.py
deleted file mode 100644
index 65348bacc96c46f0e9b9eee38dc062de3805bf56..0000000000000000000000000000000000000000
--- a/spaces/oliver2023/chatgpt-on-wechat/channel/wechat/wechaty_channel.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# encoding:utf-8
-
-"""
-wechaty channel
-Python Wechaty - https://github.com/wechaty/python-wechaty
-"""
-import base64
-import os
-import time
-import asyncio
-from bridge.context import Context
-from wechaty_puppet import FileBox
-from wechaty import Wechaty, Contact
-from wechaty.user import Message
-from bridge.reply import *
-from bridge.context import *
-from channel.chat_channel import ChatChannel
-from channel.wechat.wechaty_message import WechatyMessage
-from common.log import logger
-from common.singleton import singleton
-from config import conf
-try:
- from voice.audio_convert import any_to_sil
-except Exception as e:
- pass
-
-@singleton
-class WechatyChannel(ChatChannel):
- NOT_SUPPORT_REPLYTYPE = []
- def __init__(self):
- super().__init__()
-
- def startup(self):
- config = conf()
- token = config.get('wechaty_puppet_service_token')
- os.environ['WECHATY_PUPPET_SERVICE_TOKEN'] = token
- asyncio.run(self.main())
-
- async def main(self):
-
- loop = asyncio.get_event_loop()
- #将asyncio的loop传入处理线程
- self.handler_pool._initializer= lambda: asyncio.set_event_loop(loop)
- self.bot = Wechaty()
- self.bot.on('login', self.on_login)
- self.bot.on('message', self.on_message)
- await self.bot.start()
-
- async def on_login(self, contact: Contact):
- self.user_id = contact.contact_id
- self.name = contact.name
- logger.info('[WX] login user={}'.format(contact))
-
- # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息
- def send(self, reply: Reply, context: Context):
- receiver_id = context['receiver']
- loop = asyncio.get_event_loop()
- if context['isgroup']:
- receiver = asyncio.run_coroutine_threadsafe(self.bot.Room.find(receiver_id),loop).result()
- else:
- receiver = asyncio.run_coroutine_threadsafe(self.bot.Contact.find(receiver_id),loop).result()
- msg = None
- if reply.type == ReplyType.TEXT:
- msg = reply.content
- asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result()
- logger.info('[WX] sendMsg={}, receiver={}'.format(reply, receiver))
- elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO:
- msg = reply.content
- asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result()
- logger.info('[WX] sendMsg={}, receiver={}'.format(reply, receiver))
- elif reply.type == ReplyType.VOICE:
- voiceLength = None
- file_path = reply.content
- sil_file = os.path.splitext(file_path)[0] + '.sil'
- voiceLength = int(any_to_sil(file_path, sil_file))
- if voiceLength >= 60000:
- voiceLength = 60000
- logger.info('[WX] voice too long, length={}, set to 60s'.format(voiceLength))
- # 发送语音
- t = int(time.time())
- msg = FileBox.from_file(sil_file, name=str(t) + '.sil')
- if voiceLength is not None:
- msg.metadata['voiceLength'] = voiceLength
- asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result()
- try:
- os.remove(file_path)
- if sil_file != file_path:
- os.remove(sil_file)
- except Exception as e:
- pass
- logger.info('[WX] sendVoice={}, receiver={}'.format(reply.content, receiver))
- elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片
- img_url = reply.content
- t = int(time.time())
- msg = FileBox.from_url(url=img_url, name=str(t) + '.png')
- asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result()
- logger.info('[WX] sendImage url={}, receiver={}'.format(img_url,receiver))
- elif reply.type == ReplyType.IMAGE: # 从文件读取图片
- image_storage = reply.content
- image_storage.seek(0)
- t = int(time.time())
- msg = FileBox.from_base64(base64.b64encode(image_storage.read()), str(t) + '.png')
- asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result()
- logger.info('[WX] sendImage, receiver={}'.format(receiver))
-
- async def on_message(self, msg: Message):
- """
- listen for message event
- """
- try:
- cmsg = await WechatyMessage(msg)
- except NotImplementedError as e:
- logger.debug('[WX] {}'.format(e))
- return
- except Exception as e:
- logger.exception('[WX] {}'.format(e))
- return
- logger.debug('[WX] message:{}'.format(cmsg))
- room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None
- isgroup = room is not None
- ctype = cmsg.ctype
- context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg)
- if context:
- logger.info('[WX] receiveMsg={}, context={}'.format(cmsg, context))
- self.produce(context)
\ No newline at end of file
diff --git a/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/README.md b/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/README.md
deleted file mode 100644
index 0bf50653704091fd4cb182b46801624d8fd2b810..0000000000000000000000000000000000000000
--- a/spaces/openaccess-ai-collective/manticore-13b-chat-pyg/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
----
-title: Manticore 13B Chat
-emoji: 🏃
-colorFrom: blue
-colorTo: gray
-sdk: gradio
-sdk_version: 3.29.0
-app_file: tabbed.py
-pinned: false
-duplicated_from: openaccess-ai-collective/ggml-ui
----
-
-# GGML UI Inference w/ HuggingFace Spaces
-
-- Fork this space to use your own GGML models. Simply update the [./config.yml](./config.yml)
-- Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
-
-Brought to you by [OpenAccess AI Collective](https://github.com/OpenAccess-AI-Collective)
\ No newline at end of file
diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/installation.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/installation.md
deleted file mode 100644
index 4a9146a22620699a7faabb45844809be581a4d7a..0000000000000000000000000000000000000000
--- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/ko/installation.md
+++ /dev/null
@@ -1,142 +0,0 @@
-
-
-# 설치
-
-사용하시는 라이브러리에 맞는 🤗 Diffusers를 설치하세요.
-
-🤗 Diffusers는 Python 3.8+, PyTorch 1.7.0+ 및 flax에서 테스트되었습니다. 사용중인 딥러닝 라이브러리에 대한 아래의 설치 안내를 따르세요.
-
-- [PyTorch 설치 안내](https://pytorch.org/get-started/locally/)
-- [Flax 설치 안내](https://flax.readthedocs.io/en/latest/)
-
-## pip를 이용한 설치
-
-[가상 환경](https://docs.python.org/3/library/venv.html)에 🤗 Diffusers를 설치해야 합니다.
-Python 가상 환경에 익숙하지 않은 경우 [가상환경 pip 설치 가이드](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)를 살펴보세요.
-가상 환경을 사용하면 서로 다른 프로젝트를 더 쉽게 관리하고, 종속성간의 호환성 문제를 피할 수 있습니다.
-
-프로젝트 디렉토리에 가상 환경을 생성하는 것으로 시작하세요:
-
-```bash
-python -m venv .env
-```
-
-그리고 가상 환경을 활성화합니다:
-
-```bash
-source .env/bin/activate
-```
-
-이제 다음의 명령어로 🤗 Diffusers를 설치할 준비가 되었습니다:
-
-**PyTorch의 경우**
-
-```bash
-pip install diffusers["torch"]
-```
-
-**Flax의 경우**
-
-```bash
-pip install diffusers["flax"]
-```
-
-## 소스로부터 설치
-
-소스에서 `diffusers`를 설치하기 전에, `torch` 및 `accelerate`이 설치되어 있는지 확인하세요.
-
-`torch` 설치에 대해서는 [torch docs](https://pytorch.org/get-started/locally/#start-locally)를 참고하세요.
-
-다음과 같이 `accelerate`을 설치하세요.
-
-```bash
-pip install accelerate
-```
-
-다음 명령어를 사용하여 소스에서 🤗 Diffusers를 설치하세요:
-
-```bash
-pip install git+https://github.com/huggingface/diffusers
-```
-
-이 명령어는 최신 `stable` 버전이 아닌 최첨단 `main` 버전을 설치합니다.
-`main` 버전은 최신 개발 정보를 최신 상태로 유지하는 데 유용합니다.
-예를 들어 마지막 공식 릴리즈 이후 버그가 수정되었지만, 새 릴리즈가 아직 출시되지 않은 경우입니다.
-그러나 이는 `main` 버전이 항상 안정적이지 않을 수 있음을 의미합니다.
-우리는 `main` 버전이 지속적으로 작동하도록 노력하고 있으며, 대부분의 문제는 보통 몇 시간 또는 하루 안에 해결됩니다.
-문제가 발생하면 더 빨리 해결할 수 있도록 [Issue](https://github.com/huggingface/transformers/issues)를 열어주세요!
-
-
-## 편집가능한 설치
-
-다음을 수행하려면 편집가능한 설치가 필요합니다:
-
-* 소스 코드의 `main` 버전을 사용
-* 🤗 Diffusers에 기여 (코드의 변경 사항을 테스트하기 위해 필요)
-
-저장소를 복제하고 다음 명령어를 사용하여 🤗 Diffusers를 설치합니다:
-
-```bash
-git clone https://github.com/huggingface/diffusers.git
-cd diffusers
-```
-
-**PyTorch의 경우**
-
-```
-pip install -e ".[torch]"
-```
-
-**Flax의 경우**
-
-```
-pip install -e ".[flax]"
-```
-
-이러한 명령어들은 저장소를 복제한 폴더와 Python 라이브러리 경로를 연결합니다.
-Python은 이제 일반 라이브러리 경로에 더하여 복제한 폴더 내부를 살펴봅니다.
-예를들어 Python 패키지가 `~/anaconda3/envs/main/lib/python3.8/site-packages/`에 설치되어 있는 경우 Python은 복제한 폴더인 `~/diffusers/`도 검색합니다.
-
-
-
-라이브러리를 계속 사용하려면 `diffusers` 폴더를 유지해야 합니다.
-
-
-
-이제 다음 명령어를 사용하여 최신 버전의 🤗 Diffusers로 쉽게 업데이트할 수 있습니다:
-
-```bash
-cd ~/diffusers/
-git pull
-```
-
-이렇게 하면, 다음에 실행할 때 Python 환경이 🤗 Diffusers의 `main` 버전을 찾게 됩니다.
-
-## 텔레메트리 로깅에 대한 알림
-
-우리 라이브러리는 `from_pretrained()` 요청 중에 텔레메트리 정보를 원격으로 수집합니다.
-이 데이터에는 Diffusers 및 PyTorch/Flax의 버전, 요청된 모델 또는 파이프라인 클래스, 그리고 허브에서 호스팅되는 경우 사전학습된 체크포인트에 대한 경로를 포함합니다.
-이 사용 데이터는 문제를 디버깅하고 새로운 기능의 우선순위를 지정하는데 도움이 됩니다.
-텔레메트리는 HuggingFace 허브에서 모델과 파이프라인을 불러올 때만 전송되며, 로컬 사용 중에는 수집되지 않습니다.
-
-우리는 추가 정보를 공유하지 않기를 원하는 사람이 있다는 것을 이해하고 개인 정보를 존중하므로, 터미널에서 `DISABLE_TELEMETRY` 환경 변수를 설정하여 텔레메트리 수집을 비활성화할 수 있습니다.
-
-Linux/MacOS에서:
-```bash
-export DISABLE_TELEMETRY=YES
-```
-
-Windows에서:
-```bash
-set DISABLE_TELEMETRY=YES
-```
\ No newline at end of file
diff --git a/spaces/pakooo/Text2Image/README.md b/spaces/pakooo/Text2Image/README.md
deleted file mode 100644
index f326898bc24067a7c44b0e55bfa2ace562581751..0000000000000000000000000000000000000000
--- a/spaces/pakooo/Text2Image/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Text To Image
-emoji: 👀
-colorFrom: purple
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
-duplicated_from: yizhangliu/Text-to-Image
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/parkyzh/bingo/src/lib/utils.ts b/spaces/parkyzh/bingo/src/lib/utils.ts
deleted file mode 100644
index 07feedb34e356b1b3cf867872f32d47a96ae12fb..0000000000000000000000000000000000000000
--- a/spaces/parkyzh/bingo/src/lib/utils.ts
+++ /dev/null
@@ -1,138 +0,0 @@
-import { clsx, type ClassValue } from 'clsx'
-import { customAlphabet } from 'nanoid'
-import { twMerge } from 'tailwind-merge'
-
-export function cn(...inputs: ClassValue[]) {
- return twMerge(clsx(inputs))
-}
-
-export const nanoid = customAlphabet(
- '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
- 7
-) // 7-character random string
-
-export function createChunkDecoder() {
- const decoder = new TextDecoder()
- return function (chunk: Uint8Array | undefined): string {
- if (!chunk) return ''
- return decoder.decode(chunk, { stream: true })
- }
-}
-
-export function random (start: number, end: number) {
- return start + Math.ceil(Math.random() * (end - start))
-}
-
-export function randomIP() {
- return `11.${random(104, 107)}.${random(1, 255)}.${random(1, 255)}`
-}
-
-export function parseHeadersFromCurl(content: string) {
- const re = /-H '([^:]+):\s*([^']+)/mg
- const headers: HeadersInit = {}
- content = content.replaceAll('-H "', '-H \'').replaceAll('" ^', '\'\\').replaceAll('^\\^"', '"') // 将 cmd curl 转成 bash curl
- content.replace(re, (_: string, key: string, value: string) => {
- headers[key] = value
- return ''
- })
-
- return headers
-}
-
-export const ChunkKeys = ['BING_HEADER', 'BING_HEADER1', 'BING_HEADER2']
-export function encodeHeadersToCookie(content: string) {
- const base64Content = btoa(content)
- const contentChunks = base64Content.match(/.{1,4000}/g) || []
- return ChunkKeys.map((key, index) => `${key}=${contentChunks[index] ?? ''}`)
-}
-
-export function extraCurlFromCookie(cookies: Partial<{ [key: string]: string }>) {
- let base64Content = ''
- ChunkKeys.forEach((key) => {
- base64Content += (cookies[key] || '')
- })
- try {
- return atob(base64Content)
- } catch(e) {
- return ''
- }
-}
-
-export function extraHeadersFromCookie(cookies: Partial<{ [key: string]: string }>) {
- return parseHeadersFromCurl(extraCurlFromCookie(cookies))
-}
-
-export function formatDate(input: string | number | Date): string {
- const date = new Date(input)
- return date.toLocaleDateString('en-US', {
- month: 'long',
- day: 'numeric',
- year: 'numeric'
- })
-}
-
-export function parseCookie(cookie: string, cookieName: string) {
- const targetCookie = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`).test(cookie) ? RegExp.$1 : cookie
- return targetCookie ? decodeURIComponent(targetCookie).trim() : cookie.indexOf('=') === -1 ? cookie.trim() : ''
-}
-
-export function parseCookies(cookie: string, cookieNames: string[]) {
- const cookies: { [key: string]: string } = {}
- cookieNames.forEach(cookieName => {
- cookies[cookieName] = parseCookie(cookie, cookieName)
- })
- return cookies
-}
-
-export const DEFAULT_UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0'
-export const DEFAULT_IP = process.env.BING_IP || randomIP()
-
-export function parseUA(ua?: string, default_ua = DEFAULT_UA) {
- return / EDGE?/i.test(decodeURIComponent(ua || '')) ? decodeURIComponent(ua!.trim()) : default_ua
-}
-
-export function createHeaders(cookies: Partial<{ [key: string]: string }>, defaultHeaders?: Partial<{ [key: string]: string }>) {
- let {
- BING_COOKIE = process.env.BING_COOKIE,
- BING_UA = process.env.BING_UA,
- BING_IP = process.env.BING_IP,
- BING_HEADER = process.env.BING_HEADER,
- } = cookies
-
- if (BING_HEADER) {
- return extraHeadersFromCookie({
- BING_HEADER,
- ...cookies,
- })
- }
-
- const ua = parseUA(BING_UA)
-
- if (!BING_COOKIE) {
- BING_COOKIE = defaultHeaders?.IMAGE_BING_COOKIE || 'xxx' // hf 暂时不用 Cookie 也可以正常使用
- }
-
- const parsedCookie = parseCookie(BING_COOKIE, '_U')
- if (!parsedCookie) {
- throw new Error('Invalid Cookie')
- }
- return {
- 'x-forwarded-for': BING_IP || DEFAULT_IP,
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
- 'User-Agent': ua!,
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- cookie: `_U=${parsedCookie}` || '',
- }
-}
-
-export class WatchDog {
- private tid = 0
- watch(fn: Function, timeout = 2000) {
- clearTimeout(this.tid)
- this.tid = setTimeout(fn, timeout + Math.random() * 1000)
- }
- reset() {
- clearTimeout(this.tid)
- }
-}
diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/oldresnet152.py b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/oldresnet152.py
deleted file mode 100644
index f783c2c0c0b11a9bde635dac3090c38af448ab88..0000000000000000000000000000000000000000
--- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/oldresnet152.py
+++ /dev/null
@@ -1,922 +0,0 @@
-
-import torch
-import torch.nn as nn
-
-from functools import reduce
-from torch.autograd import Variable
-
-def load_places_resnet152(weight_file):
- model = OldResNet152()
- state_dict = torch.load(weight_file)
- model.load_state_dict(state_dict)
- return model
-
-class LambdaBase(nn.Sequential):
- def __init__(self, fn, *args):
- super(LambdaBase, self).__init__(*args)
- self.lambda_func = fn
-
- def forward_prepare(self, input):
- output = []
- for module in self._modules.values():
- output.append(module(input))
- return output if output else input
-
-class Lambda(LambdaBase):
- def forward(self, input):
- return self.lambda_func(self.forward_prepare(input))
-
-class LambdaMap(LambdaBase):
- def forward(self, input):
- return list(map(self.lambda_func,self.forward_prepare(input)))
-
-class LambdaReduce(LambdaBase):
- def forward(self, input):
- return reduce(self.lambda_func,self.forward_prepare(input))
-
-
-class OldResNet152(nn.Sequential):
- def __init__(self):
- children = [
-# resnet152_places365 = nn.Sequential( # Sequential,
- nn.Conv2d(3,64,(7, 7),(2, 2),(3, 3),1,1,bias=False),
- nn.BatchNorm2d(64),
- nn.ReLU(),
- nn.MaxPool2d((3, 3),(2, 2),(1, 1)),
- nn.Sequential( # Sequential,
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(64,64,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(64),
- nn.ReLU(),
- nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(64),
- nn.ReLU(),
- nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- ),
- nn.Sequential( # Sequential,
- nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- ),
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(256,64,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(64),
- nn.ReLU(),
- nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(64),
- nn.ReLU(),
- nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(256,64,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(64),
- nn.ReLU(),
- nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(64),
- nn.ReLU(),
- nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- ),
- nn.Sequential( # Sequential,
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(256,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,128,(3, 3),(2, 2),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- nn.Sequential( # Sequential,
- nn.Conv2d(256,512,(1, 1),(2, 2),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(512,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(128),
- nn.ReLU(),
- nn.Conv2d(128,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- ),
- nn.Sequential( # Sequential,
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(512,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(2, 2),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- nn.Sequential( # Sequential,
- nn.Conv2d(512,1024,(1, 1),(2, 2),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(256),
- nn.ReLU(),
- nn.Conv2d(256,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(1024),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- ),
- nn.Sequential( # Sequential,
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- nn.ReLU(),
- nn.Conv2d(512,512,(3, 3),(2, 2),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(512),
- nn.ReLU(),
- nn.Conv2d(512,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(2048),
- ),
- nn.Sequential( # Sequential,
- nn.Conv2d(1024,2048,(1, 1),(2, 2),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(2048),
- ),
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(2048,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- nn.ReLU(),
- nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(512),
- nn.ReLU(),
- nn.Conv2d(512,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(2048),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- nn.Sequential( # Sequential,
- LambdaMap(lambda x: x, # ConcatTable,
- nn.Sequential( # Sequential,
- nn.Conv2d(2048,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(512),
- nn.ReLU(),
- nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,1,bias=False),
- nn.BatchNorm2d(512),
- nn.ReLU(),
- nn.Conv2d(512,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
- nn.BatchNorm2d(2048),
- ),
- Lambda(lambda x: x), # Identity,
- ),
- LambdaReduce(lambda x,y: x+y), # CAddTable,
- nn.ReLU(),
- ),
- ),
- nn.AvgPool2d((7, 7),(1, 1)),
- Lambda(lambda x: x.view(x.size(0),-1)), # View,
- nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x )
- ,nn.Linear(2048,365)), # Linear,
- ]
-
- super(OldResNet152, self).__init__(*children)
diff --git a/spaces/penguin2023/vncs/start.sh b/spaces/penguin2023/vncs/start.sh
deleted file mode 100644
index d9b29a76d1e08e744c659939c871e8f69fad978d..0000000000000000000000000000000000000000
--- a/spaces/penguin2023/vncs/start.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-# ls
-# # umask 0077 # use safe default permissions
-# mkdir -p "$HO/.vnc" # create config directory
-# chmod go-rwx "$HO/.vnc" # enforce safe permissions
-# ls -l
-tigervnc_path="/tigervnc-${tigervnc_version}.x86_64/usr/bin"
-
-# Start TigerVNC
-if [ ! -z $VNC_PASSWD ]; then
- ${tigervnc_path}/vncpasswd -f <<< "$VNC_PASSWD" > ".vnc/passwd"
- ${tigervnc_path}/vncserver -rfbport 5900 -geometry ${GEOMETRY} -depth ${DEPTH}
-else
- ${tigervnc_path}/vncpasswd -f <<< "" > ".vnc/passwd"
- ${tigervnc_path}/vncserver -rfbport 5900 -geometry ${GEOMETRY} -depth ${DEPTH} -SecurityTypes None
-fi
-
-cat /noVNC-${noVNC_version}/utils/launch.sh
-
-# Start noVNC
-/noVNC-${noVNC_version}/utils/launch.sh
\ No newline at end of file
diff --git a/spaces/philsark/clip-guided-diffusion-identity/README.md b/spaces/philsark/clip-guided-diffusion-identity/README.md
deleted file mode 100644
index c87d045351d4dd8f256b19497ce1ba28b9e36f8b..0000000000000000000000000000000000000000
--- a/spaces/philsark/clip-guided-diffusion-identity/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-title: Clip-Guided-Diffusion-Identity
-emoji: 📈
-colorFrom: purple
-colorTo: blue
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/Liaobots.py b/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/Liaobots.py
deleted file mode 100644
index 985bf53ddfd3877db3c60aedee86db11ec0e7243..0000000000000000000000000000000000000000
--- a/spaces/pikto/Elite-freegpt-webui/g4f/Provider/Providers/Liaobots.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os, uuid, requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://liaobots.com'
-model = ['gpt-4-0613']
-supports_stream = True
-needs_auth = True
-
-models = {
- 'gpt-4-0613': {
- "id":"gpt-4-0613",
- "name":"GPT-4",
- "maxLength":24000,
- "tokenLimit":8000
- }
-}
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- print(kwargs)
-
- headers = {
- 'authority': 'liaobots.com',
- 'content-type': 'application/json',
- 'origin': 'https://liaobots.com',
- 'referer': 'https://liaobots.com/',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
- 'x-auth-code': 'P6cPPK6Z8JDG3'
- }
-
- json_data = {
- 'conversationId': str(uuid.uuid4()),
- 'model': models[model],
- 'authcode':"jrzVZMJiwN0NU",
- 'messages': messages,
- 'key': '',
- 'prompt': "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
- }
-
- response = requests.post('https://liaobots.com/api/chat',
- headers=headers, json=json_data, stream=True)
-
- for token in response.iter_content(chunk_size=2046):
- yield (token.decode('cp1251'))
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/pixiou/bingo/README.md b/spaces/pixiou/bingo/README.md
deleted file mode 100644
index d65eafbc8431818f738e8e086455fa6159f101bb..0000000000000000000000000000000000000000
--- a/spaces/pixiou/bingo/README.md
+++ /dev/null
@@ -1,196 +0,0 @@
----
-title: bingo
-emoji: 📉
-colorFrom: red
-colorTo: red
-sdk: docker
-license: mit
-duplicated_from: hf4all/bingo
----
-
-
-
-# Bingo
-
-Bingo,一个让你呼吸顺畅 New Bing。
-
-高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。
-
-
-
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://github.com/weaigc/bingo/blob/main/license)
-
-
-
-## 演示站点
-
-https://bing.github1s.tk
-
-
-
-[](https://bing.github1s.tk)
-
-## 功能和特点
-
-- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。
-- 支持 Docker 构建,方便快捷地部署和访问。
-- Cookie 可全局配置,全局共享。
-- 支持持续语音对话
-
-## RoadMap
-
- - [x] 支持 wss 转发
- - [x] 支持一键部署
- - [x] 优化移动端展示
- - [x] 支持画图
- - [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器)
- - [x] 支持语音输出(需要手动开启)
- - [x] 支持图片输入
- - [x] 支持自定义域名
- - [ ] 支持历史记录
- - [ ] 适配深色模式
- - [ ] 支持内置提示词
- - [ ] 支持离线访问
- - [ ] 国际化翻译
-
-## 一键部署
-你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。
-
-### 部署到 Huggingface
-1. 点击此图标
-[](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。
-
-2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。
-
-> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的
-> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名)
-> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4)
-
-### 使用Cloudflare Workers自定义域名
-
-> 核心代码 [worker.js](./cloudflare/worker.js)
-
-- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up)
-
-- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google)
-
-- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。
-
-- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。
-
-- 触发器 中自定义访问域名。
-
-### 部署其它平台
-
-
-由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看
-
-
-#### 部署到 Netlify
-[](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo)
-
-#### 部署到 Vercel
-如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用
-
-[](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example)
-
-#### 部署到 Render
-
-[](https://render.com/deploy?repo=https://github.com/weaigc/bingo)
-
-
-## 环境和依赖
-
-- Node.js >= 18
-- Bing AI 的[身份信息](#如何获取-BING_HEADER))
-
-## 安装和使用
-
-> 由于目前微软封杀比较严重,推荐优先使用 [部署 Huggingface](#部署到-huggingface) 。
-
-* 使用 Node 启动
-
-```bash
-git clone https://github.com/weaigc/bingo.git
-npm i # 推荐使用 pnpm i
-npm run build
-npm run start
-```
-
-* 使用 Docker 启动
-```bash
-docker pull weaigc/bingo
-docker run --rm -it -p 7860:7860 weaigc/bingo
-# 或者
-docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo
-```
-
-## 如何获取 BING_HEADER
-> 配置了 BING_HEADER 意味着你将自己的账号共享给所有使用此服务的人,如果不需要免登录画图的功能,不建议设置此变量
-
-打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge ,通过人机校验,然后
-
-
-
-> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证)
-
-以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。
-
-正常格式/网页端保存的格式(格式仅供参考)
-
-```
-curl 'https://www.bing.com/turing/captcha/challenge' \
- -H 'authority: www.bing.com' \
- -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \
- -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \
- -H 'cache-control: max-age=0' \
- -H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \
- -H 'dnt: 1' \
- -H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \
- -H 'sec-ch-ua-arch: "x86"' \
- -H 'sec-ch-ua-bitness: "64"' \
- -H 'sec-ch-ua-full-version: "116.0.1938.29"' \
- -H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \
- -H 'sec-ch-ua-mobile: ?0' \
- -H 'sec-ch-ua-model: ""' \
- -H 'sec-ch-ua-platform: "Windows"' \
- -H 'sec-ch-ua-platform-version: "15.0.0"' \
- -H 'sec-fetch-dest: document' \
- -H 'sec-fetch-mode: navigate' \
- -H 'sec-fetch-site: none' \
- -H 'sec-fetch-user: ?1' \
- -H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \
- -H 'sec-ms-gec-version: 1-116.0.1938.29' \
- -H 'upgrade-insecure-requests: 1' \
- -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \
- -H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \
- -H 'x-edge-shopping-flag: 1' \
- --compressed
-```
-
-
-
-转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式)
-
-```
-Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA==
-```
-
-
-
-## 鸣谢
- - 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。
- - 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。
-
-
-## 答疑及交流
-
-
-
-## License
-
-MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE).
-
-
diff --git a/spaces/pknez/face-swap-docker/Dockerfile b/spaces/pknez/face-swap-docker/Dockerfile
deleted file mode 100644
index c3ee9b1e84a98313a8ddea96c99c573c9c795b8f..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM python:3.11
-WORKDIR /usr/src/app
-RUN apt-get update && apt-get install -y libgl1-mesa-glx
-COPY requirements.txt ./
-RUN pip install --no-cache-dir -r requirements.txt
-COPY . .
-CMD ["python", "run.py"]
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py
deleted file mode 100644
index edc19627dba6835339768ccbaf726db21d8ac212..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
- pygments.style
- ~~~~~~~~~~~~~~
-
- Basic style object.
-
- :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from pip._vendor.pygments.token import Token, STANDARD_TYPES
-
-# Default mapping of ansixxx to RGB colors.
-_ansimap = {
- # dark
- 'ansiblack': '000000',
- 'ansired': '7f0000',
- 'ansigreen': '007f00',
- 'ansiyellow': '7f7fe0',
- 'ansiblue': '00007f',
- 'ansimagenta': '7f007f',
- 'ansicyan': '007f7f',
- 'ansigray': 'e5e5e5',
- # normal
- 'ansibrightblack': '555555',
- 'ansibrightred': 'ff0000',
- 'ansibrightgreen': '00ff00',
- 'ansibrightyellow': 'ffff00',
- 'ansibrightblue': '0000ff',
- 'ansibrightmagenta': 'ff00ff',
- 'ansibrightcyan': '00ffff',
- 'ansiwhite': 'ffffff',
-}
-# mapping of deprecated #ansixxx colors to new color names
-_deprecated_ansicolors = {
- # dark
- '#ansiblack': 'ansiblack',
- '#ansidarkred': 'ansired',
- '#ansidarkgreen': 'ansigreen',
- '#ansibrown': 'ansiyellow',
- '#ansidarkblue': 'ansiblue',
- '#ansipurple': 'ansimagenta',
- '#ansiteal': 'ansicyan',
- '#ansilightgray': 'ansigray',
- # normal
- '#ansidarkgray': 'ansibrightblack',
- '#ansired': 'ansibrightred',
- '#ansigreen': 'ansibrightgreen',
- '#ansiyellow': 'ansibrightyellow',
- '#ansiblue': 'ansibrightblue',
- '#ansifuchsia': 'ansibrightmagenta',
- '#ansiturquoise': 'ansibrightcyan',
- '#ansiwhite': 'ansiwhite',
-}
-ansicolors = set(_ansimap)
-
-
-class StyleMeta(type):
-
- def __new__(mcs, name, bases, dct):
- obj = type.__new__(mcs, name, bases, dct)
- for token in STANDARD_TYPES:
- if token not in obj.styles:
- obj.styles[token] = ''
-
- def colorformat(text):
- if text in ansicolors:
- return text
- if text[0:1] == '#':
- col = text[1:]
- if len(col) == 6:
- return col
- elif len(col) == 3:
- return col[0] * 2 + col[1] * 2 + col[2] * 2
- elif text == '':
- return ''
- elif text.startswith('var') or text.startswith('calc'):
- return text
- assert False, "wrong color format %r" % text
-
- _styles = obj._styles = {}
-
- for ttype in obj.styles:
- for token in ttype.split():
- if token in _styles:
- continue
- ndef = _styles.get(token.parent, None)
- styledefs = obj.styles.get(token, '').split()
- if not ndef or token is None:
- ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
- elif 'noinherit' in styledefs and token is not Token:
- ndef = _styles[Token][:]
- else:
- ndef = ndef[:]
- _styles[token] = ndef
- for styledef in obj.styles.get(token, '').split():
- if styledef == 'noinherit':
- pass
- elif styledef == 'bold':
- ndef[1] = 1
- elif styledef == 'nobold':
- ndef[1] = 0
- elif styledef == 'italic':
- ndef[2] = 1
- elif styledef == 'noitalic':
- ndef[2] = 0
- elif styledef == 'underline':
- ndef[3] = 1
- elif styledef == 'nounderline':
- ndef[3] = 0
- elif styledef[:3] == 'bg:':
- ndef[4] = colorformat(styledef[3:])
- elif styledef[:7] == 'border:':
- ndef[5] = colorformat(styledef[7:])
- elif styledef == 'roman':
- ndef[6] = 1
- elif styledef == 'sans':
- ndef[7] = 1
- elif styledef == 'mono':
- ndef[8] = 1
- else:
- ndef[0] = colorformat(styledef)
-
- return obj
-
- def style_for_token(cls, token):
- t = cls._styles[token]
- ansicolor = bgansicolor = None
- color = t[0]
- if color in _deprecated_ansicolors:
- color = _deprecated_ansicolors[color]
- if color in ansicolors:
- ansicolor = color
- color = _ansimap[color]
- bgcolor = t[4]
- if bgcolor in _deprecated_ansicolors:
- bgcolor = _deprecated_ansicolors[bgcolor]
- if bgcolor in ansicolors:
- bgansicolor = bgcolor
- bgcolor = _ansimap[bgcolor]
-
- return {
- 'color': color or None,
- 'bold': bool(t[1]),
- 'italic': bool(t[2]),
- 'underline': bool(t[3]),
- 'bgcolor': bgcolor or None,
- 'border': t[5] or None,
- 'roman': bool(t[6]) or None,
- 'sans': bool(t[7]) or None,
- 'mono': bool(t[8]) or None,
- 'ansicolor': ansicolor,
- 'bgansicolor': bgansicolor,
- }
-
- def list_styles(cls):
- return list(cls)
-
- def styles_token(cls, ttype):
- return ttype in cls._styles
-
- def __iter__(cls):
- for token in cls._styles:
- yield token, cls.style_for_token(token)
-
- def __len__(cls):
- return len(cls._styles)
-
-
-class Style(metaclass=StyleMeta):
-
- #: overall background color (``None`` means transparent)
- background_color = '#ffffff'
-
- #: highlight background color
- highlight_color = '#ffffcc'
-
- #: line number font color
- line_number_color = 'inherit'
-
- #: line number background color
- line_number_background_color = 'transparent'
-
- #: special line number font color
- line_number_special_color = '#000000'
-
- #: special line number background color
- line_number_special_background_color = '#ffffc0'
-
- #: Style definitions for individual token types.
- styles = {}
-
- # Attribute for lexers defined within Pygments. If set
- # to True, the style is not shown in the style gallery
- # on the website. This is intended for language-specific
- # styles.
- web_style_gallery_exclude = False
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/cli/convert.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/cli/convert.py
deleted file mode 100644
index 1ce9b5f3c16adcd07672d5dbddcff9f44f4b82a7..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/cli/convert.py
+++ /dev/null
@@ -1,273 +0,0 @@
-from __future__ import annotations
-
-import os.path
-import re
-import shutil
-import tempfile
-import zipfile
-from glob import iglob
-
-from ..bdist_wheel import bdist_wheel
-from ..wheelfile import WheelFile
-from . import WheelError
-
-try:
- from setuptools import Distribution
-except ImportError:
- from distutils.dist import Distribution
-
-egg_info_re = re.compile(
- r"""
- (?P.+?)-(?P.+?)
- (-(?Ppy\d\.\d+)
- (-(?P.+?))?
- )?.egg$""",
- re.VERBOSE,
-)
-
-
-class _bdist_wheel_tag(bdist_wheel):
- # allow the client to override the default generated wheel tag
- # The default bdist_wheel implementation uses python and abi tags
- # of the running python process. This is not suitable for
- # generating/repackaging prebuild binaries.
-
- full_tag_supplied = False
- full_tag = None # None or a (pytag, soabitag, plattag) triple
-
- def get_tag(self):
- if self.full_tag_supplied and self.full_tag is not None:
- return self.full_tag
- else:
- return bdist_wheel.get_tag(self)
-
-
-def egg2wheel(egg_path: str, dest_dir: str):
- filename = os.path.basename(egg_path)
- match = egg_info_re.match(filename)
- if not match:
- raise WheelError(f"Invalid egg file name: {filename}")
-
- egg_info = match.groupdict()
- dir = tempfile.mkdtemp(suffix="_e2w")
- if os.path.isfile(egg_path):
- # assume we have a bdist_egg otherwise
- with zipfile.ZipFile(egg_path) as egg:
- egg.extractall(dir)
- else:
- # support buildout-style installed eggs directories
- for pth in os.listdir(egg_path):
- src = os.path.join(egg_path, pth)
- if os.path.isfile(src):
- shutil.copy2(src, dir)
- else:
- shutil.copytree(src, os.path.join(dir, pth))
-
- pyver = egg_info["pyver"]
- if pyver:
- pyver = egg_info["pyver"] = pyver.replace(".", "")
-
- arch = (egg_info["arch"] or "any").replace(".", "_").replace("-", "_")
-
- # assume all binary eggs are for CPython
- abi = "cp" + pyver[2:] if arch != "any" else "none"
-
- root_is_purelib = egg_info["arch"] is None
- if root_is_purelib:
- bw = bdist_wheel(Distribution())
- else:
- bw = _bdist_wheel_tag(Distribution())
-
- bw.root_is_pure = root_is_purelib
- bw.python_tag = pyver
- bw.plat_name_supplied = True
- bw.plat_name = egg_info["arch"] or "any"
- if not root_is_purelib:
- bw.full_tag_supplied = True
- bw.full_tag = (pyver, abi, arch)
-
- dist_info_dir = os.path.join(dir, "{name}-{ver}.dist-info".format(**egg_info))
- bw.egg2dist(os.path.join(dir, "EGG-INFO"), dist_info_dir)
- bw.write_wheelfile(dist_info_dir, generator="egg2wheel")
- wheel_name = "{name}-{ver}-{pyver}-{}-{}.whl".format(abi, arch, **egg_info)
- with WheelFile(os.path.join(dest_dir, wheel_name), "w") as wf:
- wf.write_files(dir)
-
- shutil.rmtree(dir)
-
-
-def parse_wininst_info(wininfo_name, egginfo_name):
- """Extract metadata from filenames.
-
- Extracts the 4 metadataitems needed (name, version, pyversion, arch) from
- the installer filename and the name of the egg-info directory embedded in
- the zipfile (if any).
-
- The egginfo filename has the format::
-
- name-ver(-pyver)(-arch).egg-info
-
- The installer filename has the format::
-
- name-ver.arch(-pyver).exe
-
- Some things to note:
-
- 1. The installer filename is not definitive. An installer can be renamed
- and work perfectly well as an installer. So more reliable data should
- be used whenever possible.
- 2. The egg-info data should be preferred for the name and version, because
- these come straight from the distutils metadata, and are mandatory.
- 3. The pyver from the egg-info data should be ignored, as it is
- constructed from the version of Python used to build the installer,
- which is irrelevant - the installer filename is correct here (even to
- the point that when it's not there, any version is implied).
- 4. The architecture must be taken from the installer filename, as it is
- not included in the egg-info data.
- 5. Architecture-neutral installers still have an architecture because the
- installer format itself (being executable) is architecture-specific. We
- should therefore ignore the architecture if the content is pure-python.
- """
-
- egginfo = None
- if egginfo_name:
- egginfo = egg_info_re.search(egginfo_name)
- if not egginfo:
- raise ValueError(f"Egg info filename {egginfo_name} is not valid")
-
- # Parse the wininst filename
- # 1. Distribution name (up to the first '-')
- w_name, sep, rest = wininfo_name.partition("-")
- if not sep:
- raise ValueError(f"Installer filename {wininfo_name} is not valid")
-
- # Strip '.exe'
- rest = rest[:-4]
- # 2. Python version (from the last '-', must start with 'py')
- rest2, sep, w_pyver = rest.rpartition("-")
- if sep and w_pyver.startswith("py"):
- rest = rest2
- w_pyver = w_pyver.replace(".", "")
- else:
- # Not version specific - use py2.py3. While it is possible that
- # pure-Python code is not compatible with both Python 2 and 3, there
- # is no way of knowing from the wininst format, so we assume the best
- # here (the user can always manually rename the wheel to be more
- # restrictive if needed).
- w_pyver = "py2.py3"
- # 3. Version and architecture
- w_ver, sep, w_arch = rest.rpartition(".")
- if not sep:
- raise ValueError(f"Installer filename {wininfo_name} is not valid")
-
- if egginfo:
- w_name = egginfo.group("name")
- w_ver = egginfo.group("ver")
-
- return {"name": w_name, "ver": w_ver, "arch": w_arch, "pyver": w_pyver}
-
-
-def wininst2wheel(path, dest_dir):
- with zipfile.ZipFile(path) as bdw:
- # Search for egg-info in the archive
- egginfo_name = None
- for filename in bdw.namelist():
- if ".egg-info" in filename:
- egginfo_name = filename
- break
-
- info = parse_wininst_info(os.path.basename(path), egginfo_name)
-
- root_is_purelib = True
- for zipinfo in bdw.infolist():
- if zipinfo.filename.startswith("PLATLIB"):
- root_is_purelib = False
- break
- if root_is_purelib:
- paths = {"purelib": ""}
- else:
- paths = {"platlib": ""}
-
- dist_info = "{name}-{ver}".format(**info)
- datadir = "%s.data/" % dist_info
-
- # rewrite paths to trick ZipFile into extracting an egg
- # XXX grab wininst .ini - between .exe, padding, and first zip file.
- members = []
- egginfo_name = ""
- for zipinfo in bdw.infolist():
- key, basename = zipinfo.filename.split("/", 1)
- key = key.lower()
- basepath = paths.get(key, None)
- if basepath is None:
- basepath = datadir + key.lower() + "/"
- oldname = zipinfo.filename
- newname = basepath + basename
- zipinfo.filename = newname
- del bdw.NameToInfo[oldname]
- bdw.NameToInfo[newname] = zipinfo
- # Collect member names, but omit '' (from an entry like "PLATLIB/"
- if newname:
- members.append(newname)
- # Remember egg-info name for the egg2dist call below
- if not egginfo_name:
- if newname.endswith(".egg-info"):
- egginfo_name = newname
- elif ".egg-info/" in newname:
- egginfo_name, sep, _ = newname.rpartition("/")
- dir = tempfile.mkdtemp(suffix="_b2w")
- bdw.extractall(dir, members)
-
- # egg2wheel
- abi = "none"
- pyver = info["pyver"]
- arch = (info["arch"] or "any").replace(".", "_").replace("-", "_")
- # Wininst installers always have arch even if they are not
- # architecture-specific (because the format itself is).
- # So, assume the content is architecture-neutral if root is purelib.
- if root_is_purelib:
- arch = "any"
- # If the installer is architecture-specific, it's almost certainly also
- # CPython-specific.
- if arch != "any":
- pyver = pyver.replace("py", "cp")
- wheel_name = "-".join((dist_info, pyver, abi, arch))
- if root_is_purelib:
- bw = bdist_wheel(Distribution())
- else:
- bw = _bdist_wheel_tag(Distribution())
-
- bw.root_is_pure = root_is_purelib
- bw.python_tag = pyver
- bw.plat_name_supplied = True
- bw.plat_name = info["arch"] or "any"
-
- if not root_is_purelib:
- bw.full_tag_supplied = True
- bw.full_tag = (pyver, abi, arch)
-
- dist_info_dir = os.path.join(dir, "%s.dist-info" % dist_info)
- bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir)
- bw.write_wheelfile(dist_info_dir, generator="wininst2wheel")
-
- wheel_path = os.path.join(dest_dir, wheel_name)
- with WheelFile(wheel_path, "w") as wf:
- wf.write_files(dir)
-
- shutil.rmtree(dir)
-
-
-def convert(files, dest_dir, verbose):
- for pat in files:
- for installer in iglob(pat):
- if os.path.splitext(installer)[1] == ".egg":
- conv = egg2wheel
- else:
- conv = wininst2wheel
-
- if verbose:
- print(f"{installer}... ", flush=True)
-
- conv(installer, dest_dir)
- if verbose:
- print("OK")
diff --git a/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/structuredquery.h b/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/structuredquery.h
deleted file mode 100644
index bca20a9adac790f1f46ca915c121beb01b07c0f6..0000000000000000000000000000000000000000
--- a/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/structuredquery.h
+++ /dev/null
@@ -1,2478 +0,0 @@
-
-
-/* this ALWAYS GENERATED file contains the definitions for the interfaces */
-
-
- /* File created by MIDL compiler version 7.00.0499 */
-/* Compiler settings for structuredquery.idl:
- Oicf, W1, Zp8, env=Win32 (32b run)
- protocol : dce , ms_ext, c_ext, robust
- error checks: allocation ref bounds_check enum stub_data
- VC __declspec() decoration level:
- __declspec(uuid()), __declspec(selectany), __declspec(novtable)
- DECLSPEC_UUID(), MIDL_INTERFACE()
-*/
-//@@MIDL_FILE_HEADING( )
-
-#pragma warning( disable: 4049 ) /* more than 64k source lines */
-
-
-/* verify that the version is high enough to compile this file*/
-#ifndef __REQUIRED_RPCNDR_H_VERSION__
-#define __REQUIRED_RPCNDR_H_VERSION__ 475
-#endif
-
-/* verify that the version is high enough to compile this file*/
-#ifndef __REQUIRED_RPCSAL_H_VERSION__
-#define __REQUIRED_RPCSAL_H_VERSION__ 100
-#endif
-
-#include "rpc.h"
-#include "rpcndr.h"
-
-#ifndef __RPCNDR_H_VERSION__
-#error this stub requires an updated version of
-#endif // __RPCNDR_H_VERSION__
-
-#ifndef COM_NO_WINDOWS_H
-#include "windows.h"
-#include "ole2.h"
-#endif /*COM_NO_WINDOWS_H*/
-
-#ifndef __structuredquery_h__
-#define __structuredquery_h__
-
-#if defined(_MSC_VER) && (_MSC_VER >= 1020)
-#pragma once
-#endif
-
-/* Forward Declarations */
-
-#ifndef __IQueryParser_FWD_DEFINED__
-#define __IQueryParser_FWD_DEFINED__
-typedef interface IQueryParser IQueryParser;
-#endif /* __IQueryParser_FWD_DEFINED__ */
-
-
-#ifndef __IConditionFactory_FWD_DEFINED__
-#define __IConditionFactory_FWD_DEFINED__
-typedef interface IConditionFactory IConditionFactory;
-#endif /* __IConditionFactory_FWD_DEFINED__ */
-
-
-#ifndef __IQuerySolution_FWD_DEFINED__
-#define __IQuerySolution_FWD_DEFINED__
-typedef interface IQuerySolution IQuerySolution;
-#endif /* __IQuerySolution_FWD_DEFINED__ */
-
-
-#ifndef __ICondition_FWD_DEFINED__
-#define __ICondition_FWD_DEFINED__
-typedef interface ICondition ICondition;
-#endif /* __ICondition_FWD_DEFINED__ */
-
-
-#ifndef __IConditionGenerator_FWD_DEFINED__
-#define __IConditionGenerator_FWD_DEFINED__
-typedef interface IConditionGenerator IConditionGenerator;
-#endif /* __IConditionGenerator_FWD_DEFINED__ */
-
-
-#ifndef __IRichChunk_FWD_DEFINED__
-#define __IRichChunk_FWD_DEFINED__
-typedef interface IRichChunk IRichChunk;
-#endif /* __IRichChunk_FWD_DEFINED__ */
-
-
-#ifndef __IInterval_FWD_DEFINED__
-#define __IInterval_FWD_DEFINED__
-typedef interface IInterval IInterval;
-#endif /* __IInterval_FWD_DEFINED__ */
-
-
-#ifndef __IMetaData_FWD_DEFINED__
-#define __IMetaData_FWD_DEFINED__
-typedef interface IMetaData IMetaData;
-#endif /* __IMetaData_FWD_DEFINED__ */
-
-
-#ifndef __IEntity_FWD_DEFINED__
-#define __IEntity_FWD_DEFINED__
-typedef interface IEntity IEntity;
-#endif /* __IEntity_FWD_DEFINED__ */
-
-
-#ifndef __IRelationship_FWD_DEFINED__
-#define __IRelationship_FWD_DEFINED__
-typedef interface IRelationship IRelationship;
-#endif /* __IRelationship_FWD_DEFINED__ */
-
-
-#ifndef __INamedEntity_FWD_DEFINED__
-#define __INamedEntity_FWD_DEFINED__
-typedef interface INamedEntity INamedEntity;
-#endif /* __INamedEntity_FWD_DEFINED__ */
-
-
-#ifndef __ISchemaProvider_FWD_DEFINED__
-#define __ISchemaProvider_FWD_DEFINED__
-typedef interface ISchemaProvider ISchemaProvider;
-#endif /* __ISchemaProvider_FWD_DEFINED__ */
-
-
-#ifndef __ITokenCollection_FWD_DEFINED__
-#define __ITokenCollection_FWD_DEFINED__
-typedef interface ITokenCollection ITokenCollection;
-#endif /* __ITokenCollection_FWD_DEFINED__ */
-
-
-#ifndef __INamedEntityCollector_FWD_DEFINED__
-#define __INamedEntityCollector_FWD_DEFINED__
-typedef interface INamedEntityCollector INamedEntityCollector;
-#endif /* __INamedEntityCollector_FWD_DEFINED__ */
-
-
-#ifndef __ISchemaLocalizerSupport_FWD_DEFINED__
-#define __ISchemaLocalizerSupport_FWD_DEFINED__
-typedef interface ISchemaLocalizerSupport ISchemaLocalizerSupport;
-#endif /* __ISchemaLocalizerSupport_FWD_DEFINED__ */
-
-
-#ifndef __IQueryParserManager_FWD_DEFINED__
-#define __IQueryParserManager_FWD_DEFINED__
-typedef interface IQueryParserManager IQueryParserManager;
-#endif /* __IQueryParserManager_FWD_DEFINED__ */
-
-
-#ifndef __QueryParser_FWD_DEFINED__
-#define __QueryParser_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class QueryParser QueryParser;
-#else
-typedef struct QueryParser QueryParser;
-#endif /* __cplusplus */
-
-#endif /* __QueryParser_FWD_DEFINED__ */
-
-
-#ifndef __NegationCondition_FWD_DEFINED__
-#define __NegationCondition_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class NegationCondition NegationCondition;
-#else
-typedef struct NegationCondition NegationCondition;
-#endif /* __cplusplus */
-
-#endif /* __NegationCondition_FWD_DEFINED__ */
-
-
-#ifndef __CompoundCondition_FWD_DEFINED__
-#define __CompoundCondition_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class CompoundCondition CompoundCondition;
-#else
-typedef struct CompoundCondition CompoundCondition;
-#endif /* __cplusplus */
-
-#endif /* __CompoundCondition_FWD_DEFINED__ */
-
-
-#ifndef __LeafCondition_FWD_DEFINED__
-#define __LeafCondition_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class LeafCondition LeafCondition;
-#else
-typedef struct LeafCondition LeafCondition;
-#endif /* __cplusplus */
-
-#endif /* __LeafCondition_FWD_DEFINED__ */
-
-
-#ifndef __ConditionFactory_FWD_DEFINED__
-#define __ConditionFactory_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class ConditionFactory ConditionFactory;
-#else
-typedef struct ConditionFactory ConditionFactory;
-#endif /* __cplusplus */
-
-#endif /* __ConditionFactory_FWD_DEFINED__ */
-
-
-#ifndef __Interval_FWD_DEFINED__
-#define __Interval_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class Interval Interval;
-#else
-typedef struct Interval Interval;
-#endif /* __cplusplus */
-
-#endif /* __Interval_FWD_DEFINED__ */
-
-
-#ifndef __QueryParserManager_FWD_DEFINED__
-#define __QueryParserManager_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class QueryParserManager QueryParserManager;
-#else
-typedef struct QueryParserManager QueryParserManager;
-#endif /* __cplusplus */
-
-#endif /* __QueryParserManager_FWD_DEFINED__ */
-
-
-/* header files for imported files */
-#include "oaidl.h"
-#include "ocidl.h"
-#include "propidl.h"
-
-#ifdef __cplusplus
-extern "C"{
-#endif
-
-
-/* interface __MIDL_itf_structuredquery_0000_0000 */
-/* [local] */
-
-
-
-
-
-
-
-
-
-
-
-typedef /* [v1_enum] */
-enum tagCONDITION_TYPE
- { CT_AND_CONDITION = 0,
- CT_OR_CONDITION = ( CT_AND_CONDITION + 1 ) ,
- CT_NOT_CONDITION = ( CT_OR_CONDITION + 1 ) ,
- CT_LEAF_CONDITION = ( CT_NOT_CONDITION + 1 )
- } CONDITION_TYPE;
-
-typedef /* [v1_enum] */
-enum tagCONDITION_OPERATION
- { COP_IMPLICIT = 0,
- COP_EQUAL = ( COP_IMPLICIT + 1 ) ,
- COP_NOTEQUAL = ( COP_EQUAL + 1 ) ,
- COP_LESSTHAN = ( COP_NOTEQUAL + 1 ) ,
- COP_GREATERTHAN = ( COP_LESSTHAN + 1 ) ,
- COP_LESSTHANOREQUAL = ( COP_GREATERTHAN + 1 ) ,
- COP_GREATERTHANOREQUAL = ( COP_LESSTHANOREQUAL + 1 ) ,
- COP_VALUE_STARTSWITH = ( COP_GREATERTHANOREQUAL + 1 ) ,
- COP_VALUE_ENDSWITH = ( COP_VALUE_STARTSWITH + 1 ) ,
- COP_VALUE_CONTAINS = ( COP_VALUE_ENDSWITH + 1 ) ,
- COP_VALUE_NOTCONTAINS = ( COP_VALUE_CONTAINS + 1 ) ,
- COP_DOSWILDCARDS = ( COP_VALUE_NOTCONTAINS + 1 ) ,
- COP_WORD_EQUAL = ( COP_DOSWILDCARDS + 1 ) ,
- COP_WORD_STARTSWITH = ( COP_WORD_EQUAL + 1 ) ,
- COP_APPLICATION_SPECIFIC = ( COP_WORD_STARTSWITH + 1 )
- } CONDITION_OPERATION;
-
-typedef /* [v1_enum] */
-enum tagSTRUCTURED_QUERY_SINGLE_OPTION
- { SQSO_SCHEMA = 0,
- SQSO_LOCALE_WORD_BREAKING = ( SQSO_SCHEMA + 1 ) ,
- SQSO_WORD_BREAKER = ( SQSO_LOCALE_WORD_BREAKING + 1 ) ,
- SQSO_NATURAL_SYNTAX = ( SQSO_WORD_BREAKER + 1 ) ,
- SQSO_AUTOMATIC_WILDCARD = ( SQSO_NATURAL_SYNTAX + 1 ) ,
- SQSO_TRACE_LEVEL = ( SQSO_AUTOMATIC_WILDCARD + 1 ) ,
- SQSO_LANGUAGE_KEYWORDS = ( SQSO_TRACE_LEVEL + 1 )
- } STRUCTURED_QUERY_SINGLE_OPTION;
-
-typedef /* [v1_enum] */
-enum tagSTRUCTURED_QUERY_MULTIOPTION
- { SQMO_VIRTUAL_PROPERTY = 0,
- SQMO_DEFAULT_PROPERTY = ( SQMO_VIRTUAL_PROPERTY + 1 ) ,
- SQMO_GENERATOR_FOR_TYPE = ( SQMO_DEFAULT_PROPERTY + 1 )
- } STRUCTURED_QUERY_MULTIOPTION;
-
-typedef /* [v1_enum] */
-enum tagSTRUCTURED_QUERY_PARSE_ERROR
- { SQPE_NONE = 0,
- SQPE_EXTRA_OPENING_PARENTHESIS = ( SQPE_NONE + 1 ) ,
- SQPE_EXTRA_CLOSING_PARENTHESIS = ( SQPE_EXTRA_OPENING_PARENTHESIS + 1 ) ,
- SQPE_IGNORED_MODIFIER = ( SQPE_EXTRA_CLOSING_PARENTHESIS + 1 ) ,
- SQPE_IGNORED_CONNECTOR = ( SQPE_IGNORED_MODIFIER + 1 ) ,
- SQPE_IGNORED_KEYWORD = ( SQPE_IGNORED_CONNECTOR + 1 ) ,
- SQPE_UNHANDLED = ( SQPE_IGNORED_KEYWORD + 1 )
- } STRUCTURED_QUERY_PARSE_ERROR;
-
-/* [v1_enum] */
-enum tagSTRUCTURED_QUERY_RESOLVE_OPTION
- { SQRO_DONT_RESOLVE_DATETIME = 0x1,
- SQRO_ALWAYS_ONE_INTERVAL = 0x2,
- SQRO_DONT_SIMPLIFY_CONDITION_TREES = 0x4,
- SQRO_DONT_MAP_RELATIONS = 0x8,
- SQRO_DONT_RESOLVE_RANGES = 0x10,
- SQRO_DONT_REMOVE_UNRESTRICTED_KEYWORDS = 0x20,
- SQRO_DONT_SPLIT_WORDS = 0x40,
- SQRO_IGNORE_PHRASE_ORDER = 0x80
- } ;
-typedef int STRUCTURED_QUERY_RESOLVE_OPTION;
-
-typedef /* [v1_enum] */
-enum tagINTERVAL_LIMIT_KIND
- { ILK_EXPLICIT_INCLUDED = 0,
- ILK_EXPLICIT_EXCLUDED = ( ILK_EXPLICIT_INCLUDED + 1 ) ,
- ILK_NEGATIVE_INFINITY = ( ILK_EXPLICIT_EXCLUDED + 1 ) ,
- ILK_POSITIVE_INFINITY = ( ILK_NEGATIVE_INFINITY + 1 )
- } INTERVAL_LIMIT_KIND;
-
-typedef /* [v1_enum] */
-enum tagQUERY_PARSER_MANAGER_OPTION
- { QPMO_SCHEMA_BINARY_NAME = 0,
- QPMO_PRELOCALIZED_SCHEMA_BINARY_PATH = ( QPMO_SCHEMA_BINARY_NAME + 1 ) ,
- QPMO_UNLOCALIZED_SCHEMA_BINARY_PATH = ( QPMO_PRELOCALIZED_SCHEMA_BINARY_PATH + 1 ) ,
- QPMO_LOCALIZED_SCHEMA_BINARY_PATH = ( QPMO_UNLOCALIZED_SCHEMA_BINARY_PATH + 1 ) ,
- QPMO_APPEND_LCID_TO_LOCALIZED_PATH = ( QPMO_LOCALIZED_SCHEMA_BINARY_PATH + 1 ) ,
- QPMO_LOCALIZER_SUPPORT = ( QPMO_APPEND_LCID_TO_LOCALIZED_PATH + 1 )
- } QUERY_PARSER_MANAGER_OPTION;
-
-
-
-extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0000_v0_0_c_ifspec;
-extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0000_v0_0_s_ifspec;
-
-#ifndef __IQueryParser_INTERFACE_DEFINED__
-#define __IQueryParser_INTERFACE_DEFINED__
-
-/* interface IQueryParser */
-/* [unique][uuid][object] */
-
-
-EXTERN_C const IID IID_IQueryParser;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("2EBDEE67-3505-43f8-9946-EA44ABC8E5B0")
- IQueryParser : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE Parse(
- /* [in] */ __RPC__in LPCWSTR pszInputString,
- /* [in] */ __RPC__in_opt IEnumUnknown *pCustomProperties,
- /* [retval][out] */ __RPC__deref_out_opt IQuerySolution **ppSolution) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE SetOption(
- /* [in] */ STRUCTURED_QUERY_SINGLE_OPTION option,
- /* [in] */ __RPC__in const PROPVARIANT *pOptionValue) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetOption(
- /* [in] */ STRUCTURED_QUERY_SINGLE_OPTION option,
- /* [retval][out] */ __RPC__out PROPVARIANT *pOptionValue) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE SetMultiOption(
- /* [in] */ STRUCTURED_QUERY_MULTIOPTION option,
- /* [in] */ __RPC__in LPCWSTR pszOptionKey,
- /* [in] */ __RPC__in const PROPVARIANT *pOptionValue) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetSchemaProvider(
- /* [retval][out] */ __RPC__deref_out_opt ISchemaProvider **ppSchemaProvider) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE RestateToString(
- /* [in] */ __RPC__in_opt ICondition *pCondition,
- /* [in] */ BOOL fUseEnglish,
- /* [out] */ __RPC__deref_out_opt LPWSTR *ppszQueryString) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE ParsePropertyValue(
- /* [in] */ __RPC__in LPCWSTR pszPropertyName,
- /* [in] */ __RPC__in LPCWSTR pszInputString,
- /* [retval][out] */ __RPC__deref_out_opt IQuerySolution **ppSolution) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE RestatePropertyValueToString(
- /* [in] */ __RPC__in_opt ICondition *pCondition,
- /* [in] */ BOOL fUseEnglish,
- /* [out] */ __RPC__deref_out_opt LPWSTR *ppszPropertyName,
- /* [out] */ __RPC__deref_out_opt LPWSTR *ppszQueryString) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IQueryParserVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IQueryParser * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IQueryParser * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IQueryParser * This);
-
- HRESULT ( STDMETHODCALLTYPE *Parse )(
- IQueryParser * This,
- /* [in] */ __RPC__in LPCWSTR pszInputString,
- /* [in] */ __RPC__in_opt IEnumUnknown *pCustomProperties,
- /* [retval][out] */ __RPC__deref_out_opt IQuerySolution **ppSolution);
-
- HRESULT ( STDMETHODCALLTYPE *SetOption )(
- IQueryParser * This,
- /* [in] */ STRUCTURED_QUERY_SINGLE_OPTION option,
- /* [in] */ __RPC__in const PROPVARIANT *pOptionValue);
-
- HRESULT ( STDMETHODCALLTYPE *GetOption )(
- IQueryParser * This,
- /* [in] */ STRUCTURED_QUERY_SINGLE_OPTION option,
- /* [retval][out] */ __RPC__out PROPVARIANT *pOptionValue);
-
- HRESULT ( STDMETHODCALLTYPE *SetMultiOption )(
- IQueryParser * This,
- /* [in] */ STRUCTURED_QUERY_MULTIOPTION option,
- /* [in] */ __RPC__in LPCWSTR pszOptionKey,
- /* [in] */ __RPC__in const PROPVARIANT *pOptionValue);
-
- HRESULT ( STDMETHODCALLTYPE *GetSchemaProvider )(
- IQueryParser * This,
- /* [retval][out] */ __RPC__deref_out_opt ISchemaProvider **ppSchemaProvider);
-
- HRESULT ( STDMETHODCALLTYPE *RestateToString )(
- IQueryParser * This,
- /* [in] */ __RPC__in_opt ICondition *pCondition,
- /* [in] */ BOOL fUseEnglish,
- /* [out] */ __RPC__deref_out_opt LPWSTR *ppszQueryString);
-
- HRESULT ( STDMETHODCALLTYPE *ParsePropertyValue )(
- IQueryParser * This,
- /* [in] */ __RPC__in LPCWSTR pszPropertyName,
- /* [in] */ __RPC__in LPCWSTR pszInputString,
- /* [retval][out] */ __RPC__deref_out_opt IQuerySolution **ppSolution);
-
- HRESULT ( STDMETHODCALLTYPE *RestatePropertyValueToString )(
- IQueryParser * This,
- /* [in] */ __RPC__in_opt ICondition *pCondition,
- /* [in] */ BOOL fUseEnglish,
- /* [out] */ __RPC__deref_out_opt LPWSTR *ppszPropertyName,
- /* [out] */ __RPC__deref_out_opt LPWSTR *ppszQueryString);
-
- END_INTERFACE
- } IQueryParserVtbl;
-
- interface IQueryParser
- {
- CONST_VTBL struct IQueryParserVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IQueryParser_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IQueryParser_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IQueryParser_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IQueryParser_Parse(This,pszInputString,pCustomProperties,ppSolution) \
- ( (This)->lpVtbl -> Parse(This,pszInputString,pCustomProperties,ppSolution) )
-
-#define IQueryParser_SetOption(This,option,pOptionValue) \
- ( (This)->lpVtbl -> SetOption(This,option,pOptionValue) )
-
-#define IQueryParser_GetOption(This,option,pOptionValue) \
- ( (This)->lpVtbl -> GetOption(This,option,pOptionValue) )
-
-#define IQueryParser_SetMultiOption(This,option,pszOptionKey,pOptionValue) \
- ( (This)->lpVtbl -> SetMultiOption(This,option,pszOptionKey,pOptionValue) )
-
-#define IQueryParser_GetSchemaProvider(This,ppSchemaProvider) \
- ( (This)->lpVtbl -> GetSchemaProvider(This,ppSchemaProvider) )
-
-#define IQueryParser_RestateToString(This,pCondition,fUseEnglish,ppszQueryString) \
- ( (This)->lpVtbl -> RestateToString(This,pCondition,fUseEnglish,ppszQueryString) )
-
-#define IQueryParser_ParsePropertyValue(This,pszPropertyName,pszInputString,ppSolution) \
- ( (This)->lpVtbl -> ParsePropertyValue(This,pszPropertyName,pszInputString,ppSolution) )
-
-#define IQueryParser_RestatePropertyValueToString(This,pCondition,fUseEnglish,ppszPropertyName,ppszQueryString) \
- ( (This)->lpVtbl -> RestatePropertyValueToString(This,pCondition,fUseEnglish,ppszPropertyName,ppszQueryString) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IQueryParser_INTERFACE_DEFINED__ */
-
-
-#ifndef __IConditionFactory_INTERFACE_DEFINED__
-#define __IConditionFactory_INTERFACE_DEFINED__
-
-/* interface IConditionFactory */
-/* [unique][uuid][object] */
-
-
-EXTERN_C const IID IID_IConditionFactory;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("A5EFE073-B16F-474f-9F3E-9F8B497A3E08")
- IConditionFactory : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE MakeNot(
- /* [in] */ __RPC__in_opt ICondition *pSubCondition,
- /* [in] */ BOOL simplify,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE MakeAndOr(
- /* [in] */ CONDITION_TYPE nodeType,
- /* [in] */ __RPC__in_opt IEnumUnknown *pSubConditions,
- /* [in] */ BOOL simplify,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE MakeLeaf(
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName,
- /* [in] */ CONDITION_OPERATION op,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType,
- /* [in] */ __RPC__in const PROPVARIANT *pValue,
- /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pValueTerm,
- /* [in] */ BOOL expand,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE Resolve(
- /* [in] */
- __in ICondition *pConditionTree,
- /* [in] */
- __in STRUCTURED_QUERY_RESOLVE_OPTION sqro,
- /* [ref][in] */
- __in_opt const SYSTEMTIME *pstReferenceTime,
- /* [retval][out] */
- __out ICondition **ppResolvedConditionTree) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IConditionFactoryVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IConditionFactory * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IConditionFactory * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IConditionFactory * This);
-
- HRESULT ( STDMETHODCALLTYPE *MakeNot )(
- IConditionFactory * This,
- /* [in] */ __RPC__in_opt ICondition *pSubCondition,
- /* [in] */ BOOL simplify,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery);
-
- HRESULT ( STDMETHODCALLTYPE *MakeAndOr )(
- IConditionFactory * This,
- /* [in] */ CONDITION_TYPE nodeType,
- /* [in] */ __RPC__in_opt IEnumUnknown *pSubConditions,
- /* [in] */ BOOL simplify,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery);
-
- HRESULT ( STDMETHODCALLTYPE *MakeLeaf )(
- IConditionFactory * This,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName,
- /* [in] */ CONDITION_OPERATION op,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType,
- /* [in] */ __RPC__in const PROPVARIANT *pValue,
- /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pValueTerm,
- /* [in] */ BOOL expand,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *Resolve )(
- IConditionFactory * This,
- /* [in] */
- __in ICondition *pConditionTree,
- /* [in] */
- __in STRUCTURED_QUERY_RESOLVE_OPTION sqro,
- /* [ref][in] */
- __in_opt const SYSTEMTIME *pstReferenceTime,
- /* [retval][out] */
- __out ICondition **ppResolvedConditionTree);
-
- END_INTERFACE
- } IConditionFactoryVtbl;
-
- interface IConditionFactory
- {
- CONST_VTBL struct IConditionFactoryVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IConditionFactory_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IConditionFactory_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IConditionFactory_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IConditionFactory_MakeNot(This,pSubCondition,simplify,ppResultQuery) \
- ( (This)->lpVtbl -> MakeNot(This,pSubCondition,simplify,ppResultQuery) )
-
-#define IConditionFactory_MakeAndOr(This,nodeType,pSubConditions,simplify,ppResultQuery) \
- ( (This)->lpVtbl -> MakeAndOr(This,nodeType,pSubConditions,simplify,ppResultQuery) )
-
-#define IConditionFactory_MakeLeaf(This,pszPropertyName,op,pszValueType,pValue,pPropertyNameTerm,pOperationTerm,pValueTerm,expand,ppResultQuery) \
- ( (This)->lpVtbl -> MakeLeaf(This,pszPropertyName,op,pszValueType,pValue,pPropertyNameTerm,pOperationTerm,pValueTerm,expand,ppResultQuery) )
-
-#define IConditionFactory_Resolve(This,pConditionTree,sqro,pstReferenceTime,ppResolvedConditionTree) \
- ( (This)->lpVtbl -> Resolve(This,pConditionTree,sqro,pstReferenceTime,ppResolvedConditionTree) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IConditionFactory_INTERFACE_DEFINED__ */
-
-
-#ifndef __IQuerySolution_INTERFACE_DEFINED__
-#define __IQuerySolution_INTERFACE_DEFINED__
-
-/* interface IQuerySolution */
-/* [unique][uuid][object] */
-
-
-EXTERN_C const IID IID_IQuerySolution;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("D6EBC66B-8921-4193-AFDD-A1789FB7FF57")
- IQuerySolution : public IConditionFactory
- {
- public:
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetQuery(
- /* [out] */
- __out_opt ICondition **ppQueryNode,
- /* [out] */
- __out_opt IEntity **ppMainType) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetErrors(
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppParseErrors) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetLexicalData(
- /* [out] */
- __deref_opt_out LPWSTR *ppszInputString,
- /* [out] */
- __out_opt ITokenCollection **ppTokens,
- /* [out] */
- __out_opt LCID *pLocale,
- /* [out] */
- __out_opt IUnknown **ppWordBreaker) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IQuerySolutionVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IQuerySolution * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IQuerySolution * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IQuerySolution * This);
-
- HRESULT ( STDMETHODCALLTYPE *MakeNot )(
- IQuerySolution * This,
- /* [in] */ __RPC__in_opt ICondition *pSubCondition,
- /* [in] */ BOOL simplify,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery);
-
- HRESULT ( STDMETHODCALLTYPE *MakeAndOr )(
- IQuerySolution * This,
- /* [in] */ CONDITION_TYPE nodeType,
- /* [in] */ __RPC__in_opt IEnumUnknown *pSubConditions,
- /* [in] */ BOOL simplify,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery);
-
- HRESULT ( STDMETHODCALLTYPE *MakeLeaf )(
- IQuerySolution * This,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName,
- /* [in] */ CONDITION_OPERATION op,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType,
- /* [in] */ __RPC__in const PROPVARIANT *pValue,
- /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pValueTerm,
- /* [in] */ BOOL expand,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppResultQuery);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *Resolve )(
- IQuerySolution * This,
- /* [in] */
- __in ICondition *pConditionTree,
- /* [in] */
- __in STRUCTURED_QUERY_RESOLVE_OPTION sqro,
- /* [ref][in] */
- __in_opt const SYSTEMTIME *pstReferenceTime,
- /* [retval][out] */
- __out ICondition **ppResolvedConditionTree);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetQuery )(
- IQuerySolution * This,
- /* [out] */
- __out_opt ICondition **ppQueryNode,
- /* [out] */
- __out_opt IEntity **ppMainType);
-
- HRESULT ( STDMETHODCALLTYPE *GetErrors )(
- IQuerySolution * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppParseErrors);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetLexicalData )(
- IQuerySolution * This,
- /* [out] */
- __deref_opt_out LPWSTR *ppszInputString,
- /* [out] */
- __out_opt ITokenCollection **ppTokens,
- /* [out] */
- __out_opt LCID *pLocale,
- /* [out] */
- __out_opt IUnknown **ppWordBreaker);
-
- END_INTERFACE
- } IQuerySolutionVtbl;
-
- interface IQuerySolution
- {
- CONST_VTBL struct IQuerySolutionVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IQuerySolution_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IQuerySolution_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IQuerySolution_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IQuerySolution_MakeNot(This,pSubCondition,simplify,ppResultQuery) \
- ( (This)->lpVtbl -> MakeNot(This,pSubCondition,simplify,ppResultQuery) )
-
-#define IQuerySolution_MakeAndOr(This,nodeType,pSubConditions,simplify,ppResultQuery) \
- ( (This)->lpVtbl -> MakeAndOr(This,nodeType,pSubConditions,simplify,ppResultQuery) )
-
-#define IQuerySolution_MakeLeaf(This,pszPropertyName,op,pszValueType,pValue,pPropertyNameTerm,pOperationTerm,pValueTerm,expand,ppResultQuery) \
- ( (This)->lpVtbl -> MakeLeaf(This,pszPropertyName,op,pszValueType,pValue,pPropertyNameTerm,pOperationTerm,pValueTerm,expand,ppResultQuery) )
-
-#define IQuerySolution_Resolve(This,pConditionTree,sqro,pstReferenceTime,ppResolvedConditionTree) \
- ( (This)->lpVtbl -> Resolve(This,pConditionTree,sqro,pstReferenceTime,ppResolvedConditionTree) )
-
-
-#define IQuerySolution_GetQuery(This,ppQueryNode,ppMainType) \
- ( (This)->lpVtbl -> GetQuery(This,ppQueryNode,ppMainType) )
-
-#define IQuerySolution_GetErrors(This,riid,ppParseErrors) \
- ( (This)->lpVtbl -> GetErrors(This,riid,ppParseErrors) )
-
-#define IQuerySolution_GetLexicalData(This,ppszInputString,ppTokens,pLocale,ppWordBreaker) \
- ( (This)->lpVtbl -> GetLexicalData(This,ppszInputString,ppTokens,pLocale,ppWordBreaker) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IQuerySolution_INTERFACE_DEFINED__ */
-
-
-#ifndef __ICondition_INTERFACE_DEFINED__
-#define __ICondition_INTERFACE_DEFINED__
-
-/* interface ICondition */
-/* [unique][uuid][object] */
-
-
-EXTERN_C const IID IID_ICondition;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("0FC988D4-C935-4b97-A973-46282EA175C8")
- ICondition : public IPersistStream
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE GetConditionType(
- /* [retval][out] */ __RPC__out CONDITION_TYPE *pNodeType) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetSubConditions(
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppv) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetComparisonInfo(
- /* [out] */
- __deref_opt_out LPWSTR *ppszPropertyName,
- /* [out] */
- __out_opt CONDITION_OPERATION *pOperation,
- /* [out] */
- __out_opt PROPVARIANT *pValue) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetValueType(
- /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszValueTypeName) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetValueNormalization(
- /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszNormalization) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetInputTerms(
- /* [out] */
- __out_opt IRichChunk **ppPropertyTerm,
- /* [out] */
- __out_opt IRichChunk **ppOperationTerm,
- /* [out] */
- __out_opt IRichChunk **ppValueTerm) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE Clone(
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppc) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IConditionVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- ICondition * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- ICondition * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- ICondition * This);
-
- HRESULT ( STDMETHODCALLTYPE *GetClassID )(
- ICondition * This,
- /* [out] */ __RPC__out CLSID *pClassID);
-
- HRESULT ( STDMETHODCALLTYPE *IsDirty )(
- ICondition * This);
-
- HRESULT ( STDMETHODCALLTYPE *Load )(
- ICondition * This,
- /* [unique][in] */ __RPC__in_opt IStream *pStm);
-
- HRESULT ( STDMETHODCALLTYPE *Save )(
- ICondition * This,
- /* [unique][in] */ __RPC__in_opt IStream *pStm,
- /* [in] */ BOOL fClearDirty);
-
- HRESULT ( STDMETHODCALLTYPE *GetSizeMax )(
- ICondition * This,
- /* [out] */ __RPC__out ULARGE_INTEGER *pcbSize);
-
- HRESULT ( STDMETHODCALLTYPE *GetConditionType )(
- ICondition * This,
- /* [retval][out] */ __RPC__out CONDITION_TYPE *pNodeType);
-
- HRESULT ( STDMETHODCALLTYPE *GetSubConditions )(
- ICondition * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppv);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetComparisonInfo )(
- ICondition * This,
- /* [out] */
- __deref_opt_out LPWSTR *ppszPropertyName,
- /* [out] */
- __out_opt CONDITION_OPERATION *pOperation,
- /* [out] */
- __out_opt PROPVARIANT *pValue);
-
- HRESULT ( STDMETHODCALLTYPE *GetValueType )(
- ICondition * This,
- /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszValueTypeName);
-
- HRESULT ( STDMETHODCALLTYPE *GetValueNormalization )(
- ICondition * This,
- /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszNormalization);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetInputTerms )(
- ICondition * This,
- /* [out] */
- __out_opt IRichChunk **ppPropertyTerm,
- /* [out] */
- __out_opt IRichChunk **ppOperationTerm,
- /* [out] */
- __out_opt IRichChunk **ppValueTerm);
-
- HRESULT ( STDMETHODCALLTYPE *Clone )(
- ICondition * This,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppc);
-
- END_INTERFACE
- } IConditionVtbl;
-
- interface ICondition
- {
- CONST_VTBL struct IConditionVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define ICondition_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define ICondition_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define ICondition_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define ICondition_GetClassID(This,pClassID) \
- ( (This)->lpVtbl -> GetClassID(This,pClassID) )
-
-
-#define ICondition_IsDirty(This) \
- ( (This)->lpVtbl -> IsDirty(This) )
-
-#define ICondition_Load(This,pStm) \
- ( (This)->lpVtbl -> Load(This,pStm) )
-
-#define ICondition_Save(This,pStm,fClearDirty) \
- ( (This)->lpVtbl -> Save(This,pStm,fClearDirty) )
-
-#define ICondition_GetSizeMax(This,pcbSize) \
- ( (This)->lpVtbl -> GetSizeMax(This,pcbSize) )
-
-
-#define ICondition_GetConditionType(This,pNodeType) \
- ( (This)->lpVtbl -> GetConditionType(This,pNodeType) )
-
-#define ICondition_GetSubConditions(This,riid,ppv) \
- ( (This)->lpVtbl -> GetSubConditions(This,riid,ppv) )
-
-#define ICondition_GetComparisonInfo(This,ppszPropertyName,pOperation,pValue) \
- ( (This)->lpVtbl -> GetComparisonInfo(This,ppszPropertyName,pOperation,pValue) )
-
-#define ICondition_GetValueType(This,ppszValueTypeName) \
- ( (This)->lpVtbl -> GetValueType(This,ppszValueTypeName) )
-
-#define ICondition_GetValueNormalization(This,ppszNormalization) \
- ( (This)->lpVtbl -> GetValueNormalization(This,ppszNormalization) )
-
-#define ICondition_GetInputTerms(This,ppPropertyTerm,ppOperationTerm,ppValueTerm) \
- ( (This)->lpVtbl -> GetInputTerms(This,ppPropertyTerm,ppOperationTerm,ppValueTerm) )
-
-#define ICondition_Clone(This,ppc) \
- ( (This)->lpVtbl -> Clone(This,ppc) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __ICondition_INTERFACE_DEFINED__ */
-
-
-#ifndef __IConditionGenerator_INTERFACE_DEFINED__
-#define __IConditionGenerator_INTERFACE_DEFINED__
-
-/* interface IConditionGenerator */
-/* [unique][uuid][object] */
-
-
-EXTERN_C const IID IID_IConditionGenerator;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("92D2CC58-4386-45a3-B98C-7E0CE64A4117")
- IConditionGenerator : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE Initialize(
- /* [in] */ __RPC__in_opt ISchemaProvider *pSchemaProvider) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE RecognizeNamedEntities(
- /* [in] */ __RPC__in LPCWSTR pszInputString,
- /* [in] */ LCID lcid,
- /* [in] */ __RPC__in_opt ITokenCollection *pTokenCollection,
- /* [out][in] */ __RPC__inout_opt INamedEntityCollector *pNamedEntities) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GenerateForLeaf(
- /* [in] */ __RPC__in_opt IConditionFactory *pConditionFactory,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName,
- /* [in] */ CONDITION_OPERATION op,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType,
- /* [in] */ __RPC__in LPCWSTR pszValue,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszValue2,
- /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pValueTerm,
- /* [in] */ BOOL automaticWildcard,
- /* [out] */ __RPC__out BOOL *pNoStringQuery,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppQueryExpression) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE DefaultPhrase(
- /* [unique][in] */ LPCWSTR pszValueType,
- /* [in] */ const PROPVARIANT *ppropvar,
- /* [in] */ BOOL fUseEnglish,
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszPhrase) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IConditionGeneratorVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IConditionGenerator * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IConditionGenerator * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IConditionGenerator * This);
-
- HRESULT ( STDMETHODCALLTYPE *Initialize )(
- IConditionGenerator * This,
- /* [in] */ __RPC__in_opt ISchemaProvider *pSchemaProvider);
-
- HRESULT ( STDMETHODCALLTYPE *RecognizeNamedEntities )(
- IConditionGenerator * This,
- /* [in] */ __RPC__in LPCWSTR pszInputString,
- /* [in] */ LCID lcid,
- /* [in] */ __RPC__in_opt ITokenCollection *pTokenCollection,
- /* [out][in] */ __RPC__inout_opt INamedEntityCollector *pNamedEntities);
-
- HRESULT ( STDMETHODCALLTYPE *GenerateForLeaf )(
- IConditionGenerator * This,
- /* [in] */ __RPC__in_opt IConditionFactory *pConditionFactory,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszPropertyName,
- /* [in] */ CONDITION_OPERATION op,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszValueType,
- /* [in] */ __RPC__in LPCWSTR pszValue,
- /* [unique][in] */ __RPC__in_opt LPCWSTR pszValue2,
- /* [in] */ __RPC__in_opt IRichChunk *pPropertyNameTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pOperationTerm,
- /* [in] */ __RPC__in_opt IRichChunk *pValueTerm,
- /* [in] */ BOOL automaticWildcard,
- /* [out] */ __RPC__out BOOL *pNoStringQuery,
- /* [retval][out] */ __RPC__deref_out_opt ICondition **ppQueryExpression);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *DefaultPhrase )(
- IConditionGenerator * This,
- /* [unique][in] */ LPCWSTR pszValueType,
- /* [in] */ const PROPVARIANT *ppropvar,
- /* [in] */ BOOL fUseEnglish,
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszPhrase);
-
- END_INTERFACE
- } IConditionGeneratorVtbl;
-
- interface IConditionGenerator
- {
- CONST_VTBL struct IConditionGeneratorVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IConditionGenerator_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IConditionGenerator_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IConditionGenerator_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IConditionGenerator_Initialize(This,pSchemaProvider) \
- ( (This)->lpVtbl -> Initialize(This,pSchemaProvider) )
-
-#define IConditionGenerator_RecognizeNamedEntities(This,pszInputString,lcid,pTokenCollection,pNamedEntities) \
- ( (This)->lpVtbl -> RecognizeNamedEntities(This,pszInputString,lcid,pTokenCollection,pNamedEntities) )
-
-#define IConditionGenerator_GenerateForLeaf(This,pConditionFactory,pszPropertyName,op,pszValueType,pszValue,pszValue2,pPropertyNameTerm,pOperationTerm,pValueTerm,automaticWildcard,pNoStringQuery,ppQueryExpression) \
- ( (This)->lpVtbl -> GenerateForLeaf(This,pConditionFactory,pszPropertyName,op,pszValueType,pszValue,pszValue2,pPropertyNameTerm,pOperationTerm,pValueTerm,automaticWildcard,pNoStringQuery,ppQueryExpression) )
-
-#define IConditionGenerator_DefaultPhrase(This,pszValueType,ppropvar,fUseEnglish,ppszPhrase) \
- ( (This)->lpVtbl -> DefaultPhrase(This,pszValueType,ppropvar,fUseEnglish,ppszPhrase) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IConditionGenerator_INTERFACE_DEFINED__ */
-
-
-#ifndef __IRichChunk_INTERFACE_DEFINED__
-#define __IRichChunk_INTERFACE_DEFINED__
-
-/* interface IRichChunk */
-/* [unique][uuid][object] */
-
-
-EXTERN_C const IID IID_IRichChunk;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("4FDEF69C-DBC9-454e-9910-B34F3C64B510")
- IRichChunk : public IUnknown
- {
- public:
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetData(
- /* [out] */
- __out_opt ULONG *pFirstPos,
- /* [out] */
- __out_opt ULONG *pLength,
- /* [out] */
- __deref_opt_out LPWSTR *ppsz,
- /* [out] */
- __out_opt PROPVARIANT *pValue) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IRichChunkVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IRichChunk * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IRichChunk * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IRichChunk * This);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetData )(
- IRichChunk * This,
- /* [out] */
- __out_opt ULONG *pFirstPos,
- /* [out] */
- __out_opt ULONG *pLength,
- /* [out] */
- __deref_opt_out LPWSTR *ppsz,
- /* [out] */
- __out_opt PROPVARIANT *pValue);
-
- END_INTERFACE
- } IRichChunkVtbl;
-
- interface IRichChunk
- {
- CONST_VTBL struct IRichChunkVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IRichChunk_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IRichChunk_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IRichChunk_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IRichChunk_GetData(This,pFirstPos,pLength,ppsz,pValue) \
- ( (This)->lpVtbl -> GetData(This,pFirstPos,pLength,ppsz,pValue) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IRichChunk_INTERFACE_DEFINED__ */
-
-
-#ifndef __IInterval_INTERFACE_DEFINED__
-#define __IInterval_INTERFACE_DEFINED__
-
-/* interface IInterval */
-/* [unique][uuid][object] */
-
-
-EXTERN_C const IID IID_IInterval;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("6BF0A714-3C18-430b-8B5D-83B1C234D3DB")
- IInterval : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE GetLimits(
- /* [out] */ __RPC__out INTERVAL_LIMIT_KIND *pilkLower,
- /* [out] */ __RPC__out PROPVARIANT *ppropvarLower,
- /* [out] */ __RPC__out INTERVAL_LIMIT_KIND *pilkUpper,
- /* [out] */ __RPC__out PROPVARIANT *ppropvarUpper) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IIntervalVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IInterval * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IInterval * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IInterval * This);
-
- HRESULT ( STDMETHODCALLTYPE *GetLimits )(
- IInterval * This,
- /* [out] */ __RPC__out INTERVAL_LIMIT_KIND *pilkLower,
- /* [out] */ __RPC__out PROPVARIANT *ppropvarLower,
- /* [out] */ __RPC__out INTERVAL_LIMIT_KIND *pilkUpper,
- /* [out] */ __RPC__out PROPVARIANT *ppropvarUpper);
-
- END_INTERFACE
- } IIntervalVtbl;
-
- interface IInterval
- {
- CONST_VTBL struct IIntervalVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IInterval_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IInterval_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IInterval_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IInterval_GetLimits(This,pilkLower,ppropvarLower,pilkUpper,ppropvarUpper) \
- ( (This)->lpVtbl -> GetLimits(This,pilkLower,ppropvarLower,pilkUpper,ppropvarUpper) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IInterval_INTERFACE_DEFINED__ */
-
-
-#ifndef __IMetaData_INTERFACE_DEFINED__
-#define __IMetaData_INTERFACE_DEFINED__
-
-/* interface IMetaData */
-/* [unique][uuid][object][helpstring] */
-
-
-EXTERN_C const IID IID_IMetaData;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("780102B0-C43B-4876-BC7B-5E9BA5C88794")
- IMetaData : public IUnknown
- {
- public:
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetData(
- /* [out] */
- __deref_opt_out LPWSTR *ppszKey,
- /* [out] */
- __deref_opt_out LPWSTR *ppszValue) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IMetaDataVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IMetaData * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IMetaData * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IMetaData * This);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetData )(
- IMetaData * This,
- /* [out] */
- __deref_opt_out LPWSTR *ppszKey,
- /* [out] */
- __deref_opt_out LPWSTR *ppszValue);
-
- END_INTERFACE
- } IMetaDataVtbl;
-
- interface IMetaData
- {
- CONST_VTBL struct IMetaDataVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IMetaData_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IMetaData_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IMetaData_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IMetaData_GetData(This,ppszKey,ppszValue) \
- ( (This)->lpVtbl -> GetData(This,ppszKey,ppszValue) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IMetaData_INTERFACE_DEFINED__ */
-
-
-/* interface __MIDL_itf_structuredquery_0000_0008 */
-/* [local] */
-
-
-
-
-extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0008_v0_0_c_ifspec;
-extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0008_v0_0_s_ifspec;
-
-#ifndef __IEntity_INTERFACE_DEFINED__
-#define __IEntity_INTERFACE_DEFINED__
-
-/* interface IEntity */
-/* [unique][object][uuid][helpstring] */
-
-
-EXTERN_C const IID IID_IEntity;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("24264891-E80B-4fd3-B7CE-4FF2FAE8931F")
- IEntity : public IUnknown
- {
- public:
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE Name(
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszName) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE Base(
- /* [retval][out] */ __RPC__deref_out_opt IEntity **pBaseEntity) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE Relationships(
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pRelationships) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetRelationship(
- /* [in] */ __RPC__in LPCWSTR pszRelationName,
- /* [retval][out] */ __RPC__deref_out_opt IRelationship **pRelationship) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE MetaData(
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE NamedEntities(
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pNamedEntities) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetNamedEntity(
- /* [in] */ __RPC__in LPCWSTR pszValue,
- /* [retval][out] */ __RPC__deref_out_opt INamedEntity **ppNamedEntity) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE DefaultPhrase(
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszPhrase) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IEntityVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IEntity * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IEntity * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IEntity * This);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *Name )(
- IEntity * This,
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszName);
-
- HRESULT ( STDMETHODCALLTYPE *Base )(
- IEntity * This,
- /* [retval][out] */ __RPC__deref_out_opt IEntity **pBaseEntity);
-
- HRESULT ( STDMETHODCALLTYPE *Relationships )(
- IEntity * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pRelationships);
-
- HRESULT ( STDMETHODCALLTYPE *GetRelationship )(
- IEntity * This,
- /* [in] */ __RPC__in LPCWSTR pszRelationName,
- /* [retval][out] */ __RPC__deref_out_opt IRelationship **pRelationship);
-
- HRESULT ( STDMETHODCALLTYPE *MetaData )(
- IEntity * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData);
-
- HRESULT ( STDMETHODCALLTYPE *NamedEntities )(
- IEntity * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pNamedEntities);
-
- HRESULT ( STDMETHODCALLTYPE *GetNamedEntity )(
- IEntity * This,
- /* [in] */ __RPC__in LPCWSTR pszValue,
- /* [retval][out] */ __RPC__deref_out_opt INamedEntity **ppNamedEntity);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *DefaultPhrase )(
- IEntity * This,
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszPhrase);
-
- END_INTERFACE
- } IEntityVtbl;
-
- interface IEntity
- {
- CONST_VTBL struct IEntityVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IEntity_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IEntity_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IEntity_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IEntity_Name(This,ppszName) \
- ( (This)->lpVtbl -> Name(This,ppszName) )
-
-#define IEntity_Base(This,pBaseEntity) \
- ( (This)->lpVtbl -> Base(This,pBaseEntity) )
-
-#define IEntity_Relationships(This,riid,pRelationships) \
- ( (This)->lpVtbl -> Relationships(This,riid,pRelationships) )
-
-#define IEntity_GetRelationship(This,pszRelationName,pRelationship) \
- ( (This)->lpVtbl -> GetRelationship(This,pszRelationName,pRelationship) )
-
-#define IEntity_MetaData(This,riid,pMetaData) \
- ( (This)->lpVtbl -> MetaData(This,riid,pMetaData) )
-
-#define IEntity_NamedEntities(This,riid,pNamedEntities) \
- ( (This)->lpVtbl -> NamedEntities(This,riid,pNamedEntities) )
-
-#define IEntity_GetNamedEntity(This,pszValue,ppNamedEntity) \
- ( (This)->lpVtbl -> GetNamedEntity(This,pszValue,ppNamedEntity) )
-
-#define IEntity_DefaultPhrase(This,ppszPhrase) \
- ( (This)->lpVtbl -> DefaultPhrase(This,ppszPhrase) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IEntity_INTERFACE_DEFINED__ */
-
-
-#ifndef __IRelationship_INTERFACE_DEFINED__
-#define __IRelationship_INTERFACE_DEFINED__
-
-/* interface IRelationship */
-/* [unique][object][uuid][helpstring] */
-
-
-EXTERN_C const IID IID_IRelationship;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("2769280B-5108-498c-9C7F-A51239B63147")
- IRelationship : public IUnknown
- {
- public:
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE Name(
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszName) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE IsReal(
- /* [retval][out] */ __RPC__out BOOL *pIsReal) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE Destination(
- /* [retval][out] */ __RPC__deref_out_opt IEntity **pDestinationEntity) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE MetaData(
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE DefaultPhrase(
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszPhrase) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IRelationshipVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IRelationship * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IRelationship * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IRelationship * This);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *Name )(
- IRelationship * This,
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszName);
-
- HRESULT ( STDMETHODCALLTYPE *IsReal )(
- IRelationship * This,
- /* [retval][out] */ __RPC__out BOOL *pIsReal);
-
- HRESULT ( STDMETHODCALLTYPE *Destination )(
- IRelationship * This,
- /* [retval][out] */ __RPC__deref_out_opt IEntity **pDestinationEntity);
-
- HRESULT ( STDMETHODCALLTYPE *MetaData )(
- IRelationship * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *DefaultPhrase )(
- IRelationship * This,
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszPhrase);
-
- END_INTERFACE
- } IRelationshipVtbl;
-
- interface IRelationship
- {
- CONST_VTBL struct IRelationshipVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IRelationship_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IRelationship_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IRelationship_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IRelationship_Name(This,ppszName) \
- ( (This)->lpVtbl -> Name(This,ppszName) )
-
-#define IRelationship_IsReal(This,pIsReal) \
- ( (This)->lpVtbl -> IsReal(This,pIsReal) )
-
-#define IRelationship_Destination(This,pDestinationEntity) \
- ( (This)->lpVtbl -> Destination(This,pDestinationEntity) )
-
-#define IRelationship_MetaData(This,riid,pMetaData) \
- ( (This)->lpVtbl -> MetaData(This,riid,pMetaData) )
-
-#define IRelationship_DefaultPhrase(This,ppszPhrase) \
- ( (This)->lpVtbl -> DefaultPhrase(This,ppszPhrase) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IRelationship_INTERFACE_DEFINED__ */
-
-
-#ifndef __INamedEntity_INTERFACE_DEFINED__
-#define __INamedEntity_INTERFACE_DEFINED__
-
-/* interface INamedEntity */
-/* [unique][uuid][object][helpstring] */
-
-
-EXTERN_C const IID IID_INamedEntity;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("ABDBD0B1-7D54-49fb-AB5C-BFF4130004CD")
- INamedEntity : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE GetValue(
- /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszValue) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE DefaultPhrase(
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszPhrase) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct INamedEntityVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- INamedEntity * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- INamedEntity * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- INamedEntity * This);
-
- HRESULT ( STDMETHODCALLTYPE *GetValue )(
- INamedEntity * This,
- /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszValue);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *DefaultPhrase )(
- INamedEntity * This,
- /* [retval][out] */
- __deref_opt_out LPWSTR *ppszPhrase);
-
- END_INTERFACE
- } INamedEntityVtbl;
-
- interface INamedEntity
- {
- CONST_VTBL struct INamedEntityVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define INamedEntity_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define INamedEntity_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define INamedEntity_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define INamedEntity_GetValue(This,ppszValue) \
- ( (This)->lpVtbl -> GetValue(This,ppszValue) )
-
-#define INamedEntity_DefaultPhrase(This,ppszPhrase) \
- ( (This)->lpVtbl -> DefaultPhrase(This,ppszPhrase) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __INamedEntity_INTERFACE_DEFINED__ */
-
-
-#ifndef __ISchemaProvider_INTERFACE_DEFINED__
-#define __ISchemaProvider_INTERFACE_DEFINED__
-
-/* interface ISchemaProvider */
-/* [unique][object][uuid][helpstring] */
-
-
-EXTERN_C const IID IID_ISchemaProvider;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("8CF89BCB-394C-49b2-AE28-A59DD4ED7F68")
- ISchemaProvider : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE Entities(
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pEntities) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE RootEntity(
- /* [retval][out] */ __RPC__deref_out_opt IEntity **pRootEntity) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE GetEntity(
- /* [in] */ __RPC__in LPCWSTR pszEntityName,
- /* [retval][out] */ __RPC__deref_out_opt IEntity **pEntity) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE MetaData(
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE Localize(
- /* [in] */ LCID lcid,
- /* [in] */ __RPC__in_opt ISchemaLocalizerSupport *pSchemaLocalizerSupport) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE SaveBinary(
- /* [in] */ __RPC__in LPCWSTR pszSchemaBinaryPath) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE LookupAuthoredNamedEntity(
- /* [in] */ __RPC__in_opt IEntity *pEntity,
- /* [in] */ __RPC__in LPCWSTR pszInputString,
- /* [in] */ __RPC__in_opt ITokenCollection *pTokenCollection,
- /* [in] */ ULONG cTokensBegin,
- /* [out] */ __RPC__out ULONG *pcTokensLength,
- /* [out] */ __RPC__deref_out_opt LPWSTR *ppszValue) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct ISchemaProviderVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- ISchemaProvider * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- ISchemaProvider * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- ISchemaProvider * This);
-
- HRESULT ( STDMETHODCALLTYPE *Entities )(
- ISchemaProvider * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pEntities);
-
- HRESULT ( STDMETHODCALLTYPE *RootEntity )(
- ISchemaProvider * This,
- /* [retval][out] */ __RPC__deref_out_opt IEntity **pRootEntity);
-
- HRESULT ( STDMETHODCALLTYPE *GetEntity )(
- ISchemaProvider * This,
- /* [in] */ __RPC__in LPCWSTR pszEntityName,
- /* [retval][out] */ __RPC__deref_out_opt IEntity **pEntity);
-
- HRESULT ( STDMETHODCALLTYPE *MetaData )(
- ISchemaProvider * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **pMetaData);
-
- HRESULT ( STDMETHODCALLTYPE *Localize )(
- ISchemaProvider * This,
- /* [in] */ LCID lcid,
- /* [in] */ __RPC__in_opt ISchemaLocalizerSupport *pSchemaLocalizerSupport);
-
- HRESULT ( STDMETHODCALLTYPE *SaveBinary )(
- ISchemaProvider * This,
- /* [in] */ __RPC__in LPCWSTR pszSchemaBinaryPath);
-
- HRESULT ( STDMETHODCALLTYPE *LookupAuthoredNamedEntity )(
- ISchemaProvider * This,
- /* [in] */ __RPC__in_opt IEntity *pEntity,
- /* [in] */ __RPC__in LPCWSTR pszInputString,
- /* [in] */ __RPC__in_opt ITokenCollection *pTokenCollection,
- /* [in] */ ULONG cTokensBegin,
- /* [out] */ __RPC__out ULONG *pcTokensLength,
- /* [out] */ __RPC__deref_out_opt LPWSTR *ppszValue);
-
- END_INTERFACE
- } ISchemaProviderVtbl;
-
- interface ISchemaProvider
- {
- CONST_VTBL struct ISchemaProviderVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define ISchemaProvider_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define ISchemaProvider_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define ISchemaProvider_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define ISchemaProvider_Entities(This,riid,pEntities) \
- ( (This)->lpVtbl -> Entities(This,riid,pEntities) )
-
-#define ISchemaProvider_RootEntity(This,pRootEntity) \
- ( (This)->lpVtbl -> RootEntity(This,pRootEntity) )
-
-#define ISchemaProvider_GetEntity(This,pszEntityName,pEntity) \
- ( (This)->lpVtbl -> GetEntity(This,pszEntityName,pEntity) )
-
-#define ISchemaProvider_MetaData(This,riid,pMetaData) \
- ( (This)->lpVtbl -> MetaData(This,riid,pMetaData) )
-
-#define ISchemaProvider_Localize(This,lcid,pSchemaLocalizerSupport) \
- ( (This)->lpVtbl -> Localize(This,lcid,pSchemaLocalizerSupport) )
-
-#define ISchemaProvider_SaveBinary(This,pszSchemaBinaryPath) \
- ( (This)->lpVtbl -> SaveBinary(This,pszSchemaBinaryPath) )
-
-#define ISchemaProvider_LookupAuthoredNamedEntity(This,pEntity,pszInputString,pTokenCollection,cTokensBegin,pcTokensLength,ppszValue) \
- ( (This)->lpVtbl -> LookupAuthoredNamedEntity(This,pEntity,pszInputString,pTokenCollection,cTokensBegin,pcTokensLength,ppszValue) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __ISchemaProvider_INTERFACE_DEFINED__ */
-
-
-#ifndef __ITokenCollection_INTERFACE_DEFINED__
-#define __ITokenCollection_INTERFACE_DEFINED__
-
-/* interface ITokenCollection */
-/* [unique][object][uuid][helpstring] */
-
-
-EXTERN_C const IID IID_ITokenCollection;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("22D8B4F2-F577-4adb-A335-C2AE88416FAB")
- ITokenCollection : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE NumberOfTokens(
- __RPC__in ULONG *pCount) = 0;
-
- virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetToken(
- /* [in] */ ULONG i,
- /* [out] */
- __out_opt ULONG *pBegin,
- /* [out] */
- __out_opt ULONG *pLength,
- /* [out] */
- __deref_opt_out LPWSTR *ppsz) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct ITokenCollectionVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- ITokenCollection * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- ITokenCollection * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- ITokenCollection * This);
-
- HRESULT ( STDMETHODCALLTYPE *NumberOfTokens )(
- ITokenCollection * This,
- __RPC__in ULONG *pCount);
-
- /* [local] */ HRESULT ( STDMETHODCALLTYPE *GetToken )(
- ITokenCollection * This,
- /* [in] */ ULONG i,
- /* [out] */
- __out_opt ULONG *pBegin,
- /* [out] */
- __out_opt ULONG *pLength,
- /* [out] */
- __deref_opt_out LPWSTR *ppsz);
-
- END_INTERFACE
- } ITokenCollectionVtbl;
-
- interface ITokenCollection
- {
- CONST_VTBL struct ITokenCollectionVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define ITokenCollection_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define ITokenCollection_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define ITokenCollection_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define ITokenCollection_NumberOfTokens(This,pCount) \
- ( (This)->lpVtbl -> NumberOfTokens(This,pCount) )
-
-#define ITokenCollection_GetToken(This,i,pBegin,pLength,ppsz) \
- ( (This)->lpVtbl -> GetToken(This,i,pBegin,pLength,ppsz) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __ITokenCollection_INTERFACE_DEFINED__ */
-
-
-/* interface __MIDL_itf_structuredquery_0000_0013 */
-/* [local] */
-
-typedef /* [public][public][v1_enum] */
-enum __MIDL___MIDL_itf_structuredquery_0000_0013_0001
- { NEC_LOW = 0,
- NEC_MEDIUM = ( NEC_LOW + 1 ) ,
- NEC_HIGH = ( NEC_MEDIUM + 1 )
- } NAMED_ENTITY_CERTAINTY;
-
-
-
-extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0013_v0_0_c_ifspec;
-extern RPC_IF_HANDLE __MIDL_itf_structuredquery_0000_0013_v0_0_s_ifspec;
-
-#ifndef __INamedEntityCollector_INTERFACE_DEFINED__
-#define __INamedEntityCollector_INTERFACE_DEFINED__
-
-/* interface INamedEntityCollector */
-/* [unique][object][uuid][helpstring] */
-
-
-EXTERN_C const IID IID_INamedEntityCollector;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("AF2440F6-8AFC-47d0-9A7F-396A0ACFB43D")
- INamedEntityCollector : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE Add(
- /* [in] */ ULONG beginSpan,
- /* [in] */ ULONG endSpan,
- /* [in] */ ULONG beginActual,
- /* [in] */ ULONG endActual,
- /* [in] */ __RPC__in_opt IEntity *pType,
- /* [in] */ __RPC__in LPCWSTR pszValue,
- /* [in] */ NAMED_ENTITY_CERTAINTY certainty) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct INamedEntityCollectorVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- INamedEntityCollector * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- INamedEntityCollector * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- INamedEntityCollector * This);
-
- HRESULT ( STDMETHODCALLTYPE *Add )(
- INamedEntityCollector * This,
- /* [in] */ ULONG beginSpan,
- /* [in] */ ULONG endSpan,
- /* [in] */ ULONG beginActual,
- /* [in] */ ULONG endActual,
- /* [in] */ __RPC__in_opt IEntity *pType,
- /* [in] */ __RPC__in LPCWSTR pszValue,
- /* [in] */ NAMED_ENTITY_CERTAINTY certainty);
-
- END_INTERFACE
- } INamedEntityCollectorVtbl;
-
- interface INamedEntityCollector
- {
- CONST_VTBL struct INamedEntityCollectorVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define INamedEntityCollector_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define INamedEntityCollector_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define INamedEntityCollector_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define INamedEntityCollector_Add(This,beginSpan,endSpan,beginActual,endActual,pType,pszValue,certainty) \
- ( (This)->lpVtbl -> Add(This,beginSpan,endSpan,beginActual,endActual,pType,pszValue,certainty) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __INamedEntityCollector_INTERFACE_DEFINED__ */
-
-
-#ifndef __ISchemaLocalizerSupport_INTERFACE_DEFINED__
-#define __ISchemaLocalizerSupport_INTERFACE_DEFINED__
-
-/* interface ISchemaLocalizerSupport */
-/* [unique][object][uuid] */
-
-
-EXTERN_C const IID IID_ISchemaLocalizerSupport;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("CA3FDCA2-BFBE-4eed-90D7-0CAEF0A1BDA1")
- ISchemaLocalizerSupport : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE Localize(
- /* [in] */ __RPC__in LPCWSTR pszGlobalString,
- /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszLocalString) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct ISchemaLocalizerSupportVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- ISchemaLocalizerSupport * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- ISchemaLocalizerSupport * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- ISchemaLocalizerSupport * This);
-
- HRESULT ( STDMETHODCALLTYPE *Localize )(
- ISchemaLocalizerSupport * This,
- /* [in] */ __RPC__in LPCWSTR pszGlobalString,
- /* [retval][out] */ __RPC__deref_out_opt LPWSTR *ppszLocalString);
-
- END_INTERFACE
- } ISchemaLocalizerSupportVtbl;
-
- interface ISchemaLocalizerSupport
- {
- CONST_VTBL struct ISchemaLocalizerSupportVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define ISchemaLocalizerSupport_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define ISchemaLocalizerSupport_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define ISchemaLocalizerSupport_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define ISchemaLocalizerSupport_Localize(This,pszGlobalString,ppszLocalString) \
- ( (This)->lpVtbl -> Localize(This,pszGlobalString,ppszLocalString) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __ISchemaLocalizerSupport_INTERFACE_DEFINED__ */
-
-
-#ifndef __IQueryParserManager_INTERFACE_DEFINED__
-#define __IQueryParserManager_INTERFACE_DEFINED__
-
-/* interface IQueryParserManager */
-/* [unique][object][uuid] */
-
-
-EXTERN_C const IID IID_IQueryParserManager;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("A879E3C4-AF77-44fb-8F37-EBD1487CF920")
- IQueryParserManager : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE CreateLoadedParser(
- /* [in] */ __RPC__in LPCWSTR pszCatalog,
- /* [in] */ LANGID langidForKeywords,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppQueryParser) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE InitializeOptions(
- /* [in] */ BOOL fUnderstandNQS,
- /* [in] */ BOOL fAutoWildCard,
- /* [in] */ __RPC__in_opt IQueryParser *pQueryParser) = 0;
-
- virtual HRESULT STDMETHODCALLTYPE SetOption(
- /* [in] */ QUERY_PARSER_MANAGER_OPTION option,
- /* [in] */ __RPC__in const PROPVARIANT *pOptionValue) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IQueryParserManagerVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IQueryParserManager * This,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IQueryParserManager * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IQueryParserManager * This);
-
- HRESULT ( STDMETHODCALLTYPE *CreateLoadedParser )(
- IQueryParserManager * This,
- /* [in] */ __RPC__in LPCWSTR pszCatalog,
- /* [in] */ LANGID langidForKeywords,
- /* [in] */ __RPC__in REFIID riid,
- /* [iid_is][retval][out] */ __RPC__deref_out_opt void **ppQueryParser);
-
- HRESULT ( STDMETHODCALLTYPE *InitializeOptions )(
- IQueryParserManager * This,
- /* [in] */ BOOL fUnderstandNQS,
- /* [in] */ BOOL fAutoWildCard,
- /* [in] */ __RPC__in_opt IQueryParser *pQueryParser);
-
- HRESULT ( STDMETHODCALLTYPE *SetOption )(
- IQueryParserManager * This,
- /* [in] */ QUERY_PARSER_MANAGER_OPTION option,
- /* [in] */ __RPC__in const PROPVARIANT *pOptionValue);
-
- END_INTERFACE
- } IQueryParserManagerVtbl;
-
- interface IQueryParserManager
- {
- CONST_VTBL struct IQueryParserManagerVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IQueryParserManager_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IQueryParserManager_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IQueryParserManager_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IQueryParserManager_CreateLoadedParser(This,pszCatalog,langidForKeywords,riid,ppQueryParser) \
- ( (This)->lpVtbl -> CreateLoadedParser(This,pszCatalog,langidForKeywords,riid,ppQueryParser) )
-
-#define IQueryParserManager_InitializeOptions(This,fUnderstandNQS,fAutoWildCard,pQueryParser) \
- ( (This)->lpVtbl -> InitializeOptions(This,fUnderstandNQS,fAutoWildCard,pQueryParser) )
-
-#define IQueryParserManager_SetOption(This,option,pOptionValue) \
- ( (This)->lpVtbl -> SetOption(This,option,pOptionValue) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IQueryParserManager_INTERFACE_DEFINED__ */
-
-
-
-#ifndef __StructuredQuery1_LIBRARY_DEFINED__
-#define __StructuredQuery1_LIBRARY_DEFINED__
-
-/* library StructuredQuery1 */
-/* [version][uuid] */
-
-
-EXTERN_C const IID LIBID_StructuredQuery1;
-
-EXTERN_C const CLSID CLSID_QueryParser;
-
-#ifdef __cplusplus
-
-class DECLSPEC_UUID("B72F8FD8-0FAB-4dd9-BDBF-245A6CE1485B")
-QueryParser;
-#endif
-
-EXTERN_C const CLSID CLSID_NegationCondition;
-
-#ifdef __cplusplus
-
-class DECLSPEC_UUID("8DE9C74C-605A-4acd-BEE3-2B222AA2D23D")
-NegationCondition;
-#endif
-
-EXTERN_C const CLSID CLSID_CompoundCondition;
-
-#ifdef __cplusplus
-
-class DECLSPEC_UUID("116F8D13-101E-4fa5-84D4-FF8279381935")
-CompoundCondition;
-#endif
-
-EXTERN_C const CLSID CLSID_LeafCondition;
-
-#ifdef __cplusplus
-
-class DECLSPEC_UUID("52F15C89-5A17-48e1-BBCD-46A3F89C7CC2")
-LeafCondition;
-#endif
-
-EXTERN_C const CLSID CLSID_ConditionFactory;
-
-#ifdef __cplusplus
-
-class DECLSPEC_UUID("E03E85B0-7BE3-4000-BA98-6C13DE9FA486")
-ConditionFactory;
-#endif
-
-EXTERN_C const CLSID CLSID_Interval;
-
-#ifdef __cplusplus
-
-class DECLSPEC_UUID("D957171F-4BF9-4de2-BCD5-C70A7CA55836")
-Interval;
-#endif
-
-EXTERN_C const CLSID CLSID_QueryParserManager;
-
-#ifdef __cplusplus
-
-class DECLSPEC_UUID("5088B39A-29B4-4d9d-8245-4EE289222F66")
-QueryParserManager;
-#endif
-#endif /* __StructuredQuery1_LIBRARY_DEFINED__ */
-
-/* Additional Prototypes for ALL interfaces */
-
-unsigned long __RPC_USER BSTR_UserSize( unsigned long *, unsigned long , BSTR * );
-unsigned char * __RPC_USER BSTR_UserMarshal( unsigned long *, unsigned char *, BSTR * );
-unsigned char * __RPC_USER BSTR_UserUnmarshal(unsigned long *, unsigned char *, BSTR * );
-void __RPC_USER BSTR_UserFree( unsigned long *, BSTR * );
-
-unsigned long __RPC_USER LPSAFEARRAY_UserSize( unsigned long *, unsigned long , LPSAFEARRAY * );
-unsigned char * __RPC_USER LPSAFEARRAY_UserMarshal( unsigned long *, unsigned char *, LPSAFEARRAY * );
-unsigned char * __RPC_USER LPSAFEARRAY_UserUnmarshal(unsigned long *, unsigned char *, LPSAFEARRAY * );
-void __RPC_USER LPSAFEARRAY_UserFree( unsigned long *, LPSAFEARRAY * );
-
-/* end of Additional Prototypes */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-
-
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py
deleted file mode 100644
index 3d28c82dc77b8b8b764bcf76d401265903db1a64..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py
+++ /dev/null
@@ -1,850 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""T2CharString operator specializer and generalizer.
-
-PostScript glyph drawing operations can be expressed in multiple different
-ways. For example, as well as the ``lineto`` operator, there is also a
-``hlineto`` operator which draws a horizontal line, removing the need to
-specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a
-vertical line, removing the need to specify a ``dy`` coordinate. As well
-as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects
-into lists of operations, this module allows for conversion between general
-and specific forms of the operation.
-
-"""
-
-from fontTools.cffLib import maxStackLimit
-
-
-def stringToProgram(string):
- if isinstance(string, str):
- string = string.split()
- program = []
- for token in string:
- try:
- token = int(token)
- except ValueError:
- try:
- token = float(token)
- except ValueError:
- pass
- program.append(token)
- return program
-
-
-def programToString(program):
- return " ".join(str(x) for x in program)
-
-
-def programToCommands(program, getNumRegions=None):
- """Takes a T2CharString program list and returns list of commands.
- Each command is a two-tuple of commandname,arg-list. The commandname might
- be empty string if no commandname shall be emitted (used for glyph width,
- hintmask/cntrmask argument, as well as stray arguments at the end of the
- program (¯\_(ツ)_/¯).
- 'getNumRegions' may be None, or a callable object. It must return the
- number of regions. 'getNumRegions' takes a single argument, vsindex. If
- the vsindex argument is None, getNumRegions returns the default number
- of regions for the charstring, else it returns the numRegions for
- the vsindex.
- The Charstring may or may not start with a width value. If the first
- non-blend operator has an odd number of arguments, then the first argument is
- a width, and is popped off. This is complicated with blend operators, as
- there may be more than one before the first hint or moveto operator, and each
- one reduces several arguments to just one list argument. We have to sum the
- number of arguments that are not part of the blend arguments, and all the
- 'numBlends' values. We could instead have said that by definition, if there
- is a blend operator, there is no width value, since CFF2 Charstrings don't
- have width values. I discussed this with Behdad, and we are allowing for an
- initial width value in this case because developers may assemble a CFF2
- charstring from CFF Charstrings, which could have width values.
- """
-
- seenWidthOp = False
- vsIndex = None
- lenBlendStack = 0
- lastBlendIndex = 0
- commands = []
- stack = []
- it = iter(program)
-
- for token in it:
- if not isinstance(token, str):
- stack.append(token)
- continue
-
- if token == "blend":
- assert getNumRegions is not None
- numSourceFonts = 1 + getNumRegions(vsIndex)
- # replace the blend op args on the stack with a single list
- # containing all the blend op args.
- numBlends = stack[-1]
- numBlendArgs = numBlends * numSourceFonts + 1
- # replace first blend op by a list of the blend ops.
- stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
- lenBlendStack += numBlends + len(stack) - 1
- lastBlendIndex = len(stack)
- # if a blend op exists, this is or will be a CFF2 charstring.
- continue
-
- elif token == "vsindex":
- vsIndex = stack[-1]
- assert type(vsIndex) is int
-
- elif (not seenWidthOp) and token in {
- "hstem",
- "hstemhm",
- "vstem",
- "vstemhm",
- "cntrmask",
- "hintmask",
- "hmoveto",
- "vmoveto",
- "rmoveto",
- "endchar",
- }:
- seenWidthOp = True
- parity = token in {"hmoveto", "vmoveto"}
- if lenBlendStack:
- # lenBlendStack has the number of args represented by the last blend
- # arg and all the preceding args. We need to now add the number of
- # args following the last blend arg.
- numArgs = lenBlendStack + len(stack[lastBlendIndex:])
- else:
- numArgs = len(stack)
- if numArgs and (numArgs % 2) ^ parity:
- width = stack.pop(0)
- commands.append(("", [width]))
-
- if token in {"hintmask", "cntrmask"}:
- if stack:
- commands.append(("", stack))
- commands.append((token, []))
- commands.append(("", [next(it)]))
- else:
- commands.append((token, stack))
- stack = []
- if stack:
- commands.append(("", stack))
- return commands
-
-
-def _flattenBlendArgs(args):
- token_list = []
- for arg in args:
- if isinstance(arg, list):
- token_list.extend(arg)
- token_list.append("blend")
- else:
- token_list.append(arg)
- return token_list
-
-
-def commandsToProgram(commands):
- """Takes a commands list as returned by programToCommands() and converts
- it back to a T2CharString program list."""
- program = []
- for op, args in commands:
- if any(isinstance(arg, list) for arg in args):
- args = _flattenBlendArgs(args)
- program.extend(args)
- if op:
- program.append(op)
- return program
-
-
-def _everyN(el, n):
- """Group the list el into groups of size n"""
- if len(el) % n != 0:
- raise ValueError(el)
- for i in range(0, len(el), n):
- yield el[i : i + n]
-
-
-class _GeneralizerDecombinerCommandsMap(object):
- @staticmethod
- def rmoveto(args):
- if len(args) != 2:
- raise ValueError(args)
- yield ("rmoveto", args)
-
- @staticmethod
- def hmoveto(args):
- if len(args) != 1:
- raise ValueError(args)
- yield ("rmoveto", [args[0], 0])
-
- @staticmethod
- def vmoveto(args):
- if len(args) != 1:
- raise ValueError(args)
- yield ("rmoveto", [0, args[0]])
-
- @staticmethod
- def rlineto(args):
- if not args:
- raise ValueError(args)
- for args in _everyN(args, 2):
- yield ("rlineto", args)
-
- @staticmethod
- def hlineto(args):
- if not args:
- raise ValueError(args)
- it = iter(args)
- try:
- while True:
- yield ("rlineto", [next(it), 0])
- yield ("rlineto", [0, next(it)])
- except StopIteration:
- pass
-
- @staticmethod
- def vlineto(args):
- if not args:
- raise ValueError(args)
- it = iter(args)
- try:
- while True:
- yield ("rlineto", [0, next(it)])
- yield ("rlineto", [next(it), 0])
- except StopIteration:
- pass
-
- @staticmethod
- def rrcurveto(args):
- if not args:
- raise ValueError(args)
- for args in _everyN(args, 6):
- yield ("rrcurveto", args)
-
- @staticmethod
- def hhcurveto(args):
- if len(args) < 4 or len(args) % 4 > 1:
- raise ValueError(args)
- if len(args) % 2 == 1:
- yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
- args = args[5:]
- for args in _everyN(args, 4):
- yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
-
- @staticmethod
- def vvcurveto(args):
- if len(args) < 4 or len(args) % 4 > 1:
- raise ValueError(args)
- if len(args) % 2 == 1:
- yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
- args = args[5:]
- for args in _everyN(args, 4):
- yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
-
- @staticmethod
- def hvcurveto(args):
- if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
- raise ValueError(args)
- last_args = None
- if len(args) % 2 == 1:
- lastStraight = len(args) % 8 == 5
- args, last_args = args[:-5], args[-5:]
- it = _everyN(args, 4)
- try:
- while True:
- args = next(it)
- yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
- args = next(it)
- yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
- except StopIteration:
- pass
- if last_args:
- args = last_args
- if lastStraight:
- yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
- else:
- yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
-
- @staticmethod
- def vhcurveto(args):
- if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
- raise ValueError(args)
- last_args = None
- if len(args) % 2 == 1:
- lastStraight = len(args) % 8 == 5
- args, last_args = args[:-5], args[-5:]
- it = _everyN(args, 4)
- try:
- while True:
- args = next(it)
- yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
- args = next(it)
- yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
- except StopIteration:
- pass
- if last_args:
- args = last_args
- if lastStraight:
- yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
- else:
- yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
-
- @staticmethod
- def rcurveline(args):
- if len(args) < 8 or len(args) % 6 != 2:
- raise ValueError(args)
- args, last_args = args[:-2], args[-2:]
- for args in _everyN(args, 6):
- yield ("rrcurveto", args)
- yield ("rlineto", last_args)
-
- @staticmethod
- def rlinecurve(args):
- if len(args) < 8 or len(args) % 2 != 0:
- raise ValueError(args)
- args, last_args = args[:-6], args[-6:]
- for args in _everyN(args, 2):
- yield ("rlineto", args)
- yield ("rrcurveto", last_args)
-
-
-def _convertBlendOpToArgs(blendList):
- # args is list of blend op args. Since we are supporting
- # recursive blend op calls, some of these args may also
- # be a list of blend op args, and need to be converted before
- # we convert the current list.
- if any([isinstance(arg, list) for arg in blendList]):
- args = [
- i
- for e in blendList
- for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
- ]
- else:
- args = blendList
-
- # We now know that blendList contains a blend op argument list, even if
- # some of the args are lists that each contain a blend op argument list.
- # Convert from:
- # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
- # to:
- # [ [x0] + [delta tuple for x0],
- # ...,
- # [xn] + [delta tuple for xn] ]
- numBlends = args[-1]
- # Can't use args.pop() when the args are being used in a nested list
- # comprehension. See calling context
- args = args[:-1]
-
- numRegions = len(args) // numBlends - 1
- if not (numBlends * (numRegions + 1) == len(args)):
- raise ValueError(blendList)
-
- defaultArgs = [[arg] for arg in args[:numBlends]]
- deltaArgs = args[numBlends:]
- numDeltaValues = len(deltaArgs)
- deltaList = [
- deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
- ]
- blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
- return blend_args
-
-
-def generalizeCommands(commands, ignoreErrors=False):
- result = []
- mapping = _GeneralizerDecombinerCommandsMap
- for op, args in commands:
- # First, generalize any blend args in the arg list.
- if any([isinstance(arg, list) for arg in args]):
- try:
- args = [
- n
- for arg in args
- for n in (
- _convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
- )
- ]
- except ValueError:
- if ignoreErrors:
- # Store op as data, such that consumers of commands do not have to
- # deal with incorrect number of arguments.
- result.append(("", args))
- result.append(("", [op]))
- else:
- raise
-
- func = getattr(mapping, op, None)
- if not func:
- result.append((op, args))
- continue
- try:
- for command in func(args):
- result.append(command)
- except ValueError:
- if ignoreErrors:
- # Store op as data, such that consumers of commands do not have to
- # deal with incorrect number of arguments.
- result.append(("", args))
- result.append(("", [op]))
- else:
- raise
- return result
-
-
-def generalizeProgram(program, getNumRegions=None, **kwargs):
- return commandsToProgram(
- generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
- )
-
-
-def _categorizeVector(v):
- """
- Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
- of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
- it returns a single zero still.
-
- >>> _categorizeVector((0,0))
- ('0', (0,))
- >>> _categorizeVector((1,0))
- ('h', (1,))
- >>> _categorizeVector((0,2))
- ('v', (2,))
- >>> _categorizeVector((1,2))
- ('r', (1, 2))
- """
- if not v[0]:
- if not v[1]:
- return "0", v[:1]
- else:
- return "v", v[1:]
- else:
- if not v[1]:
- return "h", v[:1]
- else:
- return "r", v
-
-
-def _mergeCategories(a, b):
- if a == "0":
- return b
- if b == "0":
- return a
- if a == b:
- return a
- return None
-
-
-def _negateCategory(a):
- if a == "h":
- return "v"
- if a == "v":
- return "h"
- assert a in "0r"
- return a
-
-
-def _convertToBlendCmds(args):
- # return a list of blend commands, and
- # the remaining non-blended args, if any.
- num_args = len(args)
- stack_use = 0
- new_args = []
- i = 0
- while i < num_args:
- arg = args[i]
- if not isinstance(arg, list):
- new_args.append(arg)
- i += 1
- stack_use += 1
- else:
- prev_stack_use = stack_use
- # The arg is a tuple of blend values.
- # These are each (master 0,delta 1..delta n, 1)
- # Combine as many successive tuples as we can,
- # up to the max stack limit.
- num_sources = len(arg) - 1
- blendlist = [arg]
- i += 1
- stack_use += 1 + num_sources # 1 for the num_blends arg
- while (i < num_args) and isinstance(args[i], list):
- blendlist.append(args[i])
- i += 1
- stack_use += num_sources
- if stack_use + num_sources > maxStackLimit:
- # if we are here, max stack is the CFF2 max stack.
- # I use the CFF2 max stack limit here rather than
- # the 'maxstack' chosen by the client, as the default
- # maxstack may have been used unintentionally. For all
- # the other operators, this just produces a little less
- # optimization, but here it puts a hard (and low) limit
- # on the number of source fonts that can be used.
- break
- # blendList now contains as many single blend tuples as can be
- # combined without exceeding the CFF2 stack limit.
- num_blends = len(blendlist)
- # append the 'num_blends' default font values
- blend_args = []
- for arg in blendlist:
- blend_args.append(arg[0])
- for arg in blendlist:
- assert arg[-1] == 1
- blend_args.extend(arg[1:-1])
- blend_args.append(num_blends)
- new_args.append(blend_args)
- stack_use = prev_stack_use + num_blends
-
- return new_args
-
-
-def _addArgs(a, b):
- if isinstance(b, list):
- if isinstance(a, list):
- if len(a) != len(b) or a[-1] != b[-1]:
- raise ValueError()
- return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]]
- else:
- a, b = b, a
- if isinstance(a, list):
- assert a[-1] == 1
- return [_addArgs(a[0], b)] + a[1:]
- return a + b
-
-
-def specializeCommands(
- commands,
- ignoreErrors=False,
- generalizeFirst=True,
- preserveTopology=False,
- maxstack=48,
-):
-
- # We perform several rounds of optimizations. They are carefully ordered and are:
- #
- # 0. Generalize commands.
- # This ensures that they are in our expected simple form, with each line/curve only
- # having arguments for one segment, and using the generic form (rlineto/rrcurveto).
- # If caller is sure the input is in this form, they can turn off generalization to
- # save time.
- #
- # 1. Combine successive rmoveto operations.
- #
- # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
- # We specialize into some, made-up, variants as well, which simplifies following
- # passes.
- #
- # 3. Merge or delete redundant operations, to the extent requested.
- # OpenType spec declares point numbers in CFF undefined. As such, we happily
- # change topology. If client relies on point numbers (in GPOS anchors, or for
- # hinting purposes(what?)) they can turn this off.
- #
- # 4. Peephole optimization to revert back some of the h/v variants back into their
- # original "relative" operator (rline/rrcurveto) if that saves a byte.
- #
- # 5. Combine adjacent operators when possible, minding not to go over max stack size.
- #
- # 6. Resolve any remaining made-up operators into real operators.
- #
- # I have convinced myself that this produces optimal bytecode (except for, possibly
- # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
- # A dynamic-programming approach can do the same but would be significantly slower.
- #
- # 7. For any args which are blend lists, convert them to a blend command.
-
- # 0. Generalize commands.
- if generalizeFirst:
- commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
- else:
- commands = list(commands) # Make copy since we modify in-place later.
-
- # 1. Combine successive rmoveto operations.
- for i in range(len(commands) - 1, 0, -1):
- if "rmoveto" == commands[i][0] == commands[i - 1][0]:
- v1, v2 = commands[i - 1][1], commands[i][1]
- commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
- del commands[i]
-
- # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
- #
- # We, in fact, specialize into more, made-up, variants that special-case when both
- # X and Y components are zero. This simplifies the following optimization passes.
- # This case is rare, but OCD does not let me skip it.
- #
- # After this round, we will have four variants that use the following mnemonics:
- #
- # - 'r' for relative, ie. non-zero X and non-zero Y,
- # - 'h' for horizontal, ie. zero X and non-zero Y,
- # - 'v' for vertical, ie. non-zero X and zero Y,
- # - '0' for zeros, ie. zero X and zero Y.
- #
- # The '0' pseudo-operators are not part of the spec, but help simplify the following
- # optimization rounds. We resolve them at the end. So, after this, we will have four
- # moveto and four lineto variants:
- #
- # - 0moveto, 0lineto
- # - hmoveto, hlineto
- # - vmoveto, vlineto
- # - rmoveto, rlineto
- #
- # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
- # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
- # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
- #
- # There are nine different variants of curves without the '0'. Those nine map exactly
- # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
- # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
- # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
- # arguments) is in fact an rhcurveto. The operators in the spec are designed such that
- # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
- #
- # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
- # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
- # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
- # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
- # the '0' with either 'h' or 'v' and it works.
- #
- # When we get to curve splines however, things become more complicated... XXX finish this.
- # There's one more complexity with splines. If one side of the spline is not horizontal or
- # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
- # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
- # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
- # This limits our merge opportunities later.
- #
- for i in range(len(commands)):
- op, args = commands[i]
-
- if op in {"rmoveto", "rlineto"}:
- c, args = _categorizeVector(args)
- commands[i] = c + op[1:], args
- continue
-
- if op == "rrcurveto":
- c1, args1 = _categorizeVector(args[:2])
- c2, args2 = _categorizeVector(args[-2:])
- commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
- continue
-
- # 3. Merge or delete redundant operations, to the extent requested.
- #
- # TODO
- # A 0moveto that comes before all other path operations can be removed.
- # though I find conflicting evidence for this.
- #
- # TODO
- # "If hstem and vstem hints are both declared at the beginning of a
- # CharString, and this sequence is followed directly by the hintmask or
- # cntrmask operators, then the vstem hint operator (or, if applicable,
- # the vstemhm operator) need not be included."
- #
- # "The sequence and form of a CFF2 CharString program may be represented as:
- # {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
- #
- # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
- #
- # For Type2 CharStrings the sequence is:
- # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
-
- # Some other redundancies change topology (point numbers).
- if not preserveTopology:
- for i in range(len(commands) - 1, -1, -1):
- op, args = commands[i]
-
- # A 00curveto is demoted to a (specialized) lineto.
- if op == "00curveto":
- assert len(args) == 4
- c, args = _categorizeVector(args[1:3])
- op = c + "lineto"
- commands[i] = op, args
- # and then...
-
- # A 0lineto can be deleted.
- if op == "0lineto":
- del commands[i]
- continue
-
- # Merge adjacent hlineto's and vlineto's.
- # In CFF2 charstrings from variable fonts, each
- # arg item may be a list of blendable values, one from
- # each source font.
- if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
- _, other_args = commands[i - 1]
- assert len(args) == 1 and len(other_args) == 1
- try:
- new_args = [_addArgs(args[0], other_args[0])]
- except ValueError:
- continue
- commands[i - 1] = (op, new_args)
- del commands[i]
- continue
-
- # 4. Peephole optimization to revert back some of the h/v variants back into their
- # original "relative" operator (rline/rrcurveto) if that saves a byte.
- for i in range(1, len(commands) - 1):
- op, args = commands[i]
- prv, nxt = commands[i - 1][0], commands[i + 1][0]
-
- if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
- assert len(args) == 1
- args = [0, args[0]] if op[0] == "v" else [args[0], 0]
- commands[i] = ("rlineto", args)
- continue
-
- if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
- assert (op[0] == "r") ^ (op[1] == "r")
- if op[0] == "v":
- pos = 0
- elif op[0] != "r":
- pos = 1
- elif op[1] == "v":
- pos = 4
- else:
- pos = 5
- # Insert, while maintaining the type of args (can be tuple or list).
- args = args[:pos] + type(args)((0,)) + args[pos:]
- commands[i] = ("rrcurveto", args)
- continue
-
- # 5. Combine adjacent operators when possible, minding not to go over max stack size.
- for i in range(len(commands) - 1, 0, -1):
- op1, args1 = commands[i - 1]
- op2, args2 = commands[i]
- new_op = None
-
- # Merge logic...
- if {op1, op2} <= {"rlineto", "rrcurveto"}:
- if op1 == op2:
- new_op = op1
- else:
- if op2 == "rrcurveto" and len(args2) == 6:
- new_op = "rlinecurve"
- elif len(args2) == 2:
- new_op = "rcurveline"
-
- elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
- new_op = op2
-
- elif {op1, op2} == {"vlineto", "hlineto"}:
- new_op = op1
-
- elif "curveto" == op1[2:] == op2[2:]:
- d0, d1 = op1[:2]
- d2, d3 = op2[:2]
-
- if d1 == "r" or d2 == "r" or d0 == d3 == "r":
- continue
-
- d = _mergeCategories(d1, d2)
- if d is None:
- continue
- if d0 == "r":
- d = _mergeCategories(d, d3)
- if d is None:
- continue
- new_op = "r" + d + "curveto"
- elif d3 == "r":
- d0 = _mergeCategories(d0, _negateCategory(d))
- if d0 is None:
- continue
- new_op = d0 + "r" + "curveto"
- else:
- d0 = _mergeCategories(d0, d3)
- if d0 is None:
- continue
- new_op = d0 + d + "curveto"
-
- # Make sure the stack depth does not exceed (maxstack - 1), so
- # that subroutinizer can insert subroutine calls at any point.
- if new_op and len(args1) + len(args2) < maxstack:
- commands[i - 1] = (new_op, args1 + args2)
- del commands[i]
-
- # 6. Resolve any remaining made-up operators into real operators.
- for i in range(len(commands)):
- op, args = commands[i]
-
- if op in {"0moveto", "0lineto"}:
- commands[i] = "h" + op[1:], args
- continue
-
- if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
- op0, op1 = op[:2]
- if (op0 == "r") ^ (op1 == "r"):
- assert len(args) % 2 == 1
- if op0 == "0":
- op0 = "h"
- if op1 == "0":
- op1 = "h"
- if op0 == "r":
- op0 = op1
- if op1 == "r":
- op1 = _negateCategory(op0)
- assert {op0, op1} <= {"h", "v"}, (op0, op1)
-
- if len(args) % 2:
- if op0 != op1: # vhcurveto / hvcurveto
- if (op0 == "h") ^ (len(args) % 8 == 1):
- # Swap last two args order
- args = args[:-2] + args[-1:] + args[-2:-1]
- else: # hhcurveto / vvcurveto
- if op0 == "h": # hhcurveto
- # Swap first two args order
- args = args[1:2] + args[:1] + args[2:]
-
- commands[i] = op0 + op1 + "curveto", args
- continue
-
- # 7. For any series of args which are blend lists, convert the series to a single blend arg.
- for i in range(len(commands)):
- op, args = commands[i]
- if any(isinstance(arg, list) for arg in args):
- commands[i] = op, _convertToBlendCmds(args)
-
- return commands
-
-
-def specializeProgram(program, getNumRegions=None, **kwargs):
- return commandsToProgram(
- specializeCommands(programToCommands(program, getNumRegions), **kwargs)
- )
-
-
-if __name__ == "__main__":
- import sys
-
- if len(sys.argv) == 1:
- import doctest
-
- sys.exit(doctest.testmod().failed)
-
- import argparse
-
- parser = argparse.ArgumentParser(
- "fonttools cffLib.specialer",
- description="CFF CharString generalizer/specializer",
- )
- parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
- parser.add_argument(
- "--num-regions",
- metavar="NumRegions",
- nargs="*",
- default=None,
- help="Number of variable-font regions for blend opertaions.",
- )
-
- options = parser.parse_args(sys.argv[1:])
-
- getNumRegions = (
- None
- if options.num_regions is None
- else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex])
- )
-
- program = stringToProgram(options.program)
- print("Program:")
- print(programToString(program))
- commands = programToCommands(program, getNumRegions)
- print("Commands:")
- print(commands)
- program2 = commandsToProgram(commands)
- print("Program from commands:")
- print(programToString(program2))
- assert program == program2
- print("Generalized program:")
- print(programToString(generalizeProgram(program, getNumRegions)))
- print("Specialized program:")
- print(programToString(specializeProgram(program, getNumRegions)))
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-fef9d5f8.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-fef9d5f8.js
deleted file mode 100644
index 0672f2fb4a6a2f22671672ffbd003d906fd75ea0..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-fef9d5f8.js
+++ /dev/null
@@ -1,104 +0,0 @@
-import{B as Tn}from"./Button-89057c03.js";import{u as si}from"./utils-c3e3db58.js";import{B as Wn}from"./BlockLabel-e3b0d1c3.js";import{I as ri}from"./IconButton-16e5dbea.js";import{E as zn}from"./Empty-937365d8.js";import{S as ai}from"./ShareButton-d3fa81fa.js";import{D as li}from"./Download-696bd40c.js";import{S as Bn}from"./Index-37584f50.js";import{a as ui,P as On,T as di}from"./Trim-78ec077e.js";import{U as ci,M as qt}from"./ModifyUpload-87a26b2d.js";import{r as hi}from"./file-url-f4206b44.js";import{_ as tn,p as fi,u as mi,n as _i}from"./index-0526d562.js";import{U as pi}from"./Upload-a4034e93.js";import{a as gi,U as vi}from"./UploadText-232a3213.js";import{default as Hr}from"./Example-1fe376d1.js";import"./Clear-2c7bae91.js";/* empty css */import"./svelte/svelte.js";const{SvelteComponent:bi,append:wi,attr:x,detach:yi,init:ki,insert:Ci,noop:Rt,safe_not_equal:Ei,svg_element:nn}=window.__gradio__svelte__internal;function Ri(o){let e,t;return{c(){e=nn("svg"),t=nn("path"),x(t,"stroke","currentColor"),x(t,"stroke-width","1.5"),x(t,"stroke-linecap","round"),x(t,"stroke-linejoin","round"),x(t,"d","M21.044 5.704a.6.6 0 0 1 .956.483v11.626a.6.6 0 0 1-.956.483l-7.889-5.813a.6.6 0 0 1 0-.966l7.89-5.813ZM10.044 5.704a.6.6 0 0 1 .956.483v11.626a.6.6 0 0 1-.956.483l-7.888-5.813a.6.6 0 0 1 0-.966l7.888-5.813Z"),x(e,"xmlns","http://www.w3.org/2000/svg"),x(e,"width","24px"),x(e,"height","24px"),x(e,"fill","currentColor"),x(e,"stroke-width","1.5"),x(e,"viewBox","0 0 24 24"),x(e,"color","currentColor")},m(n,i){Ci(n,e,i),wi(e,t)},p:Rt,i:Rt,o:Rt,d(n){n&&yi(e)}}}class Si extends bi{constructor(e){super(),ki(this,e,null,Ri,Ei,{})}}const{SvelteComponent:Di,append:Mi,attr:ee,detach:Li,init:Ai,insert:Pi,noop:St,safe_not_equal:Ti,svg_element:on}=window.__gradio__svelte__internal;function Wi(o){let e,t;return{c(){e=on("svg"),t=on("path"),ee(t,"stroke","currentColor"),ee(t,"stroke-width","1.5"),ee(t,"stroke-linecap","round"),ee(t,"stroke-linejoin","round"),ee(t,"d","M2.956 5.704A.6.6 0 0 0 2 6.187v11.626a.6.6 0 0 0 .956.483l7.889-5.813a.6.6 0 0 0 0-.966l-7.89-5.813ZM13.956 5.704a.6.6 0 0 0-.956.483v11.626a.6.6 0 0 0 .956.483l7.889-5.813a.6.6 0 0 0 0-.966l-7.89-5.813Z"),ee(e,"xmlns","http://www.w3.org/2000/svg"),ee(e,"width","24px"),ee(e,"height","24px"),ee(e,"fill","currentColor"),ee(e,"stroke-width","1.5"),ee(e,"viewBox","0 0 24 24"),ee(e,"color","currentColor")},m(n,i){Pi(n,e,i),Mi(e,t)},p:St,i:St,o:St,d(n){n&&Li(e)}}}class zi extends Di{constructor(e){super(),Ai(this,e,null,Wi,Ti,{})}}const{SvelteComponent:Bi,append:rt,attr:H,detach:Oi,init:Ii,insert:Ni,noop:Dt,safe_not_equal:Hi,svg_element:Je}=window.__gradio__svelte__internal;function qi(o){let e,t,n,i,s;return{c(){e=Je("svg"),t=Je("path"),n=Je("path"),i=Je("line"),s=Je("line"),H(t,"d","M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"),H(n,"d","M19 10v2a7 7 0 0 1-14 0v-2"),H(i,"x1","12"),H(i,"y1","19"),H(i,"x2","12"),H(i,"y2","23"),H(s,"x1","8"),H(s,"y1","23"),H(s,"x2","16"),H(s,"y2","23"),H(e,"xmlns","http://www.w3.org/2000/svg"),H(e,"width","100%"),H(e,"height","100%"),H(e,"viewBox","0 0 24 24"),H(e,"fill","none"),H(e,"stroke","currentColor"),H(e,"stroke-width","2"),H(e,"stroke-linecap","round"),H(e,"stroke-linejoin","round"),H(e,"class","feather feather-mic")},m(a,u){Ni(a,e,u),rt(e,t),rt(e,n),rt(e,i),rt(e,s)},p:Dt,i:Dt,o:Dt,d(a){a&&Oi(e)}}}class Ui extends Bi{constructor(e){super(),Ii(this,e,null,qi,Hi,{})}}const{SvelteComponent:ji,append:Mt,attr:V,detach:Fi,init:Vi,insert:Xi,noop:Lt,safe_not_equal:Gi,svg_element:at}=window.__gradio__svelte__internal;function Yi(o){let e,t,n,i;return{c(){e=at("svg"),t=at("path"),n=at("circle"),i=at("circle"),V(t,"d","M9 18V5l12-2v13"),V(n,"cx","6"),V(n,"cy","18"),V(n,"r","3"),V(i,"cx","18"),V(i,"cy","16"),V(i,"r","3"),V(e,"xmlns","http://www.w3.org/2000/svg"),V(e,"width","100%"),V(e,"height","100%"),V(e,"viewBox","0 0 24 24"),V(e,"fill","none"),V(e,"stroke","currentColor"),V(e,"stroke-width","1.5"),V(e,"stroke-linecap","round"),V(e,"stroke-linejoin","round"),V(e,"class","feather feather-music")},m(s,a){Xi(s,e,a),Mt(e,t),Mt(e,n),Mt(e,i)},p:Lt,i:Lt,o:Lt,d(s){s&&Fi(e)}}}class gt extends ji{constructor(e){super(),Vi(this,e,null,Yi,Gi,{})}}var Zi=globalThis&&globalThis.__awaiter||function(o,e,t,n){function i(s){return s instanceof t?s:new t(function(a){a(s)})}return new(t||(t=Promise))(function(s,a){function u(r){try{d(n.next(r))}catch(c){a(c)}}function l(r){try{d(n.throw(r))}catch(c){a(c)}}function d(r){r.done?s(r.value):i(r.value).then(u,l)}d((n=n.apply(o,e||[])).next())})};function Ji(o,e){return Zi(this,void 0,void 0,function*(){const t=new AudioContext({sampleRate:e});return t.decodeAudioData(o).finally(()=>t.close())})}function Ki(o){const e=o[0];if(e.some(t=>t>1||t<-1)){const t=e.length;let n=0;for(let i=0;in&&(n=s)}for(const i of o)for(let s=0;so?.[t],copyFromChannel:AudioBuffer.prototype.copyFromChannel,copyToChannel:AudioBuffer.prototype.copyToChannel}}const At={decode:Ji,createBuffer:Qi};var sn=globalThis&&globalThis.__awaiter||function(o,e,t,n){function i(s){return s instanceof t?s:new t(function(a){a(s)})}return new(t||(t=Promise))(function(s,a){function u(r){try{d(n.next(r))}catch(c){a(c)}}function l(r){try{d(n.throw(r))}catch(c){a(c)}}function d(r){r.done?s(r.value):i(r.value).then(u,l)}d((n=n.apply(o,e||[])).next())})};function $i(o,e,t){var n,i;return sn(this,void 0,void 0,function*(){const s=yield fetch(o,t);{const a=(n=s.clone().body)===null||n===void 0?void 0:n.getReader(),u=Number((i=s.headers)===null||i===void 0?void 0:i.get("Content-Length"));let l=0;const d=(r,c)=>sn(this,void 0,void 0,function*(){if(r)return;l+=c?.length||0;const h=Math.round(l/u*100);return e(h),a?.read().then(({done:f,value:m})=>d(f,m))});a?.read().then(({done:r,value:c})=>d(r,c))}return s.blob()})}const xi={fetchBlob:$i};class vt{constructor(){this.listeners={},this.on=this.addEventListener,this.un=this.removeEventListener}addEventListener(e,t,n){if(this.listeners[e]||(this.listeners[e]=new Set),this.listeners[e].add(t),n?.once){const i=()=>{this.removeEventListener(e,i),this.removeEventListener(e,t)};return this.addEventListener(e,i),i}return()=>this.removeEventListener(e,t)}removeEventListener(e,t){var n;(n=this.listeners[e])===null||n===void 0||n.delete(t)}once(e,t){return this.on(e,t,{once:!0})}unAll(){this.listeners={}}emit(e,...t){this.listeners[e]&&this.listeners[e].forEach(n=>n(...t))}}class eo extends vt{constructor(e){super(),this.isExternalMedia=!1,e.media?(this.media=e.media,this.isExternalMedia=!0):this.media=document.createElement("audio"),e.mediaControls&&(this.media.controls=!0),e.autoplay&&(this.media.autoplay=!0),e.playbackRate!=null&&this.onceMediaEvent("canplay",()=>{e.playbackRate!=null&&(this.media.playbackRate=e.playbackRate)})}onMediaEvent(e,t,n){return this.media.addEventListener(e,t,n),()=>this.media.removeEventListener(e,t)}onceMediaEvent(e,t){return this.onMediaEvent(e,t,{once:!0})}getSrc(){return this.media.currentSrc||this.media.src||""}revokeSrc(){const e=this.getSrc();e.startsWith("blob:")&&URL.revokeObjectURL(e)}setSrc(e,t){if(this.getSrc()===e)return;this.revokeSrc();const i=t instanceof Blob?URL.createObjectURL(t):e;this.media.src=i,this.media.load()}destroy(){this.media.pause(),!this.isExternalMedia&&(this.media.remove(),this.revokeSrc(),this.media.src="",this.media.load())}setMediaElement(e){this.media=e}play(){return this.media.play()}pause(){this.media.pause()}isPlaying(){return!this.media.paused&&!this.media.ended}setTime(e){this.media.currentTime=e}getDuration(){return this.media.duration}getCurrentTime(){return this.media.currentTime}getVolume(){return this.media.volume}setVolume(e){this.media.volume=e}getMuted(){return this.media.muted}setMuted(e){this.media.muted=e}getPlaybackRate(){return this.media.playbackRate}setPlaybackRate(e,t){t!=null&&(this.media.preservesPitch=t),this.media.playbackRate=e}getMediaElement(){return this.media}setSinkId(e){return this.media.setSinkId(e)}}function to(o,e,t,n,i=5){let s=()=>{};if(!o)return s;const a=u=>{if(u.button===2)return;u.preventDefault(),u.stopPropagation(),o.style.touchAction="none";let l=u.clientX,d=u.clientY,r=!1;const c=m=>{m.preventDefault(),m.stopPropagation();const _=m.clientX,g=m.clientY;if(r||Math.abs(_-l)>=i||Math.abs(g-d)>=i){const{left:v,top:y}=o.getBoundingClientRect();r||(r=!0,t?.(l-v,d-y)),e(_-l,g-d,_-v,g-y),l=_,d=g}},h=m=>{r&&(m.preventDefault(),m.stopPropagation())},f=()=>{o.style.touchAction="",r&&n?.(),s()};document.addEventListener("pointermove",c),document.addEventListener("pointerup",f),document.addEventListener("pointerleave",f),document.addEventListener("click",h,!0),s=()=>{document.removeEventListener("pointermove",c),document.removeEventListener("pointerup",f),document.removeEventListener("pointerleave",f),setTimeout(()=>{document.removeEventListener("click",h,!0)},10)}};return o.addEventListener("pointerdown",a),()=>{s(),o.removeEventListener("pointerdown",a)}}class bt extends vt{constructor(e,t){super(),this.timeouts=[],this.isScrolling=!1,this.audioData=null,this.resizeObserver=null,this.isDragging=!1,this.options=e;const n=this.parentFromOptionsContainer(e.container);this.parent=n;const[i,s]=this.initHtml();n.appendChild(i),this.container=i,this.scrollContainer=s.querySelector(".scroll"),this.wrapper=s.querySelector(".wrapper"),this.canvasWrapper=s.querySelector(".canvases"),this.progressWrapper=s.querySelector(".progress"),this.cursor=s.querySelector(".cursor"),t&&s.appendChild(t),this.initEvents()}parentFromOptionsContainer(e){let t;if(typeof e=="string"?t=document.querySelector(e):e instanceof HTMLElement&&(t=e),!t)throw new Error("Container not found");return t}initEvents(){const e=n=>{const i=this.wrapper.getBoundingClientRect(),s=n.clientX-i.left,a=n.clientX-i.left,u=s/i.width,l=a/i.height;return[u,l]};this.wrapper.addEventListener("click",n=>{const[i,s]=e(n);this.emit("click",i,s)}),this.wrapper.addEventListener("dblclick",n=>{const[i,s]=e(n);this.emit("dblclick",i,s)}),this.options.dragToSeek&&this.initDrag(),this.scrollContainer.addEventListener("scroll",()=>{const{scrollLeft:n,scrollWidth:i,clientWidth:s}=this.scrollContainer,a=n/i,u=(n+s)/i;this.emit("scroll",a,u)});const t=this.createDelay(100);this.resizeObserver=new ResizeObserver(()=>{t(()=>this.reRender())}),this.resizeObserver.observe(this.scrollContainer)}initDrag(){to(this.wrapper,(e,t,n)=>{this.emit("drag",Math.max(0,Math.min(1,n/this.wrapper.getBoundingClientRect().width)))},()=>this.isDragging=!0,()=>this.isDragging=!1)}getHeight(){return this.options.height==null?128:isNaN(Number(this.options.height))?this.options.height==="auto"&&this.parent.clientHeight||128:Number(this.options.height)}initHtml(){const e=document.createElement("div"),t=e.attachShadow({mode:"open"});return t.innerHTML=`
-
-
-
- `,[e,t]}setOptions(e){if(this.options.container!==e.container){const t=this.parentFromOptionsContainer(e.container);t.appendChild(this.container),this.parent=t}e.dragToSeek&&!this.options.dragToSeek&&this.initDrag(),this.options=e,this.reRender()}getWrapper(){return this.wrapper}getScroll(){return this.scrollContainer.scrollLeft}destroy(){var e;this.container.remove(),(e=this.resizeObserver)===null||e===void 0||e.disconnect()}createDelay(e=10){const t={};return this.timeouts.push(t),n=>{t.timeout&&clearTimeout(t.timeout),t.timeout=setTimeout(n,e)}}convertColorValues(e){if(!Array.isArray(e))return e||"";if(e.length<2)return e[0]||"";const t=document.createElement("canvas"),i=t.getContext("2d").createLinearGradient(0,0,0,t.height),s=1/(e.length-1);return e.forEach((a,u)=>{const l=u*s;i.addColorStop(l,a)}),i}renderBarWaveform(e,t,n,i){const s=e[0],a=e[1]||e[0],u=s.length,{width:l,height:d}=n.canvas,r=d/2,c=window.devicePixelRatio||1,h=t.barWidth?t.barWidth*c:1,f=t.barGap?t.barGap*c:t.barWidth?h/2:0,m=t.barRadius||0,_=l/(h+f)/u,g=m&&"roundRect"in n?"roundRect":"rect";n.beginPath();let v=0,y=0,k=0;for(let D=0;D<=u;D++){const M=Math.round(D*_);if(M>v){const P=Math.round(y*r*i),T=Math.round(k*r*i),I=P+T||1;let B=r-P;t.barAlign==="top"?B=0:t.barAlign==="bottom"&&(B=d-I),n[g](v*(h+f),B,h,I,m),v=M,y=0,k=0}const A=Math.abs(s[D]||0),E=Math.abs(a[D]||0);A>y&&(y=A),E>k&&(k=E)}n.fill(),n.closePath()}renderLineWaveform(e,t,n,i){const s=a=>{const u=e[a]||e[0],l=u.length,{height:d}=n.canvas,r=d/2,c=n.canvas.width/l;n.moveTo(0,r);let h=0,f=0;for(let m=0;m<=l;m++){const _=Math.round(m*c);if(_>h){const v=Math.round(f*r*i)||1,y=r+v*(a===0?-1:1);n.lineTo(h,y),h=_,f=0}const g=Math.abs(u[m]||0);g>f&&(f=g)}n.lineTo(h,r)};n.beginPath(),s(0),s(1),n.fill(),n.closePath()}renderWaveform(e,t,n){if(n.fillStyle=this.convertColorValues(t.waveColor),t.renderFunction){t.renderFunction(e,n);return}let i=t.barHeight||1;if(t.normalize){const s=Array.from(e[0]).reduce((a,u)=>Math.max(a,Math.abs(u)),0);i=s?1/s:1}if(t.barWidth||t.barGap||t.barAlign){this.renderBarWaveform(e,t,n,i);return}this.renderLineWaveform(e,t,n,i)}renderSingleCanvas(e,t,n,i,s,a,u,l){const d=window.devicePixelRatio||1,r=document.createElement("canvas"),c=e[0].length;r.width=Math.round(n*(a-s)/c),r.height=i*d,r.style.width=`${Math.floor(r.width/d)}px`,r.style.height=`${i}px`,r.style.left=`${Math.floor(s*n/d/c)}px`,u.appendChild(r);const h=r.getContext("2d");if(this.renderWaveform(e.map(f=>f.slice(s,a)),t,h),r.width>0&&r.height>0){const f=r.cloneNode(),m=f.getContext("2d");m.drawImage(r,0,0),m.globalCompositeOperation="source-in",m.fillStyle=this.convertColorValues(t.progressColor),m.fillRect(0,0,r.width,r.height),l.appendChild(f)}}renderChannel(e,t,n){const i=document.createElement("div"),s=this.getHeight();i.style.height=`${s}px`,this.canvasWrapper.style.minHeight=`${s}px`,this.canvasWrapper.appendChild(i);const a=i.cloneNode();this.progressWrapper.appendChild(a);const{scrollLeft:u,scrollWidth:l,clientWidth:d}=this.scrollContainer,r=e[0].length,c=r/l;let h=Math.min(bt.MAX_CANVAS_WIDTH,d);if(t.barWidth||t.barGap){const M=t.barWidth||.5,A=t.barGap||M/2,E=M+A;h%E!==0&&(h=Math.floor(h/E)*E)}const f=Math.floor(Math.abs(u)*c),m=Math.floor(f+h*c),_=m-f,g=(M,A)=>{this.renderSingleCanvas(e,t,n,s,Math.max(0,M),Math.min(A,r),i,a)},v=this.createDelay(),y=this.createDelay(),k=(M,A)=>{g(M,A),M>0&&v(()=>{k(M-_,A-_)})},D=(M,A)=>{g(M,A),A{D(M+_,A+_)})};k(f,m),mu.timeout&&clearTimeout(u.timeout)),this.timeouts=[],this.canvasWrapper.innerHTML="",this.progressWrapper.innerHTML="",this.wrapper.style.width="",this.options.width!=null&&(this.scrollContainer.style.width=typeof this.options.width=="number"?`${this.options.width}px`:this.options.width);const t=window.devicePixelRatio||1,n=this.scrollContainer.clientWidth,i=Math.ceil(e.duration*(this.options.minPxPerSec||0));this.isScrolling=i>n;const s=this.options.fillParent&&!this.isScrolling,a=(s?n:i)*t;if(this.wrapper.style.width=s?"100%":`${i}px`,this.scrollContainer.style.overflowX=this.isScrolling?"auto":"hidden",this.scrollContainer.classList.toggle("noScrollbar",!!this.options.hideScrollbar),this.cursor.style.backgroundColor=`${this.options.cursorColor||this.options.progressColor}`,this.cursor.style.width=`${this.options.cursorWidth}px`,this.options.splitChannels)for(let u=0;u1&&u.push(e.getChannelData(1)),this.renderChannel(u,this.options,a)}this.audioData=e,this.emit("render")}reRender(){if(!this.audioData)return;const e=this.progressWrapper.clientWidth;this.render(this.audioData);const t=this.progressWrapper.clientWidth;this.scrollContainer.scrollLeft+=t-e}zoom(e){this.options.minPxPerSec=e,this.reRender()}scrollIntoView(e,t=!1){const{clientWidth:n,scrollLeft:i,scrollWidth:s}=this.scrollContainer,a=s*e,u=n/2,l=t&&this.options.autoCenter&&!this.isDragging?u:n;if(a>i+l||a=d&&a{}}start(){this.unsubscribe=this.on("tick",()=>{requestAnimationFrame(()=>{this.emit("tick")})}),this.emit("tick")}stop(){this.unsubscribe()}destroy(){this.unsubscribe()}}var Pt=globalThis&&globalThis.__awaiter||function(o,e,t,n){function i(s){return s instanceof t?s:new t(function(a){a(s)})}return new(t||(t=Promise))(function(s,a){function u(r){try{d(n.next(r))}catch(c){a(c)}}function l(r){try{d(n.throw(r))}catch(c){a(c)}}function d(r){r.done?s(r.value):i(r.value).then(u,l)}d((n=n.apply(o,e||[])).next())})};class io extends vt{constructor(e=new AudioContext){super(),this.bufferNode=null,this.autoplay=!1,this.playStartTime=0,this.playedDuration=0,this._muted=!1,this.buffer=null,this.currentSrc="",this.paused=!0,this.crossOrigin=null,this.audioContext=e,this.gainNode=this.audioContext.createGain(),this.gainNode.connect(this.audioContext.destination)}load(){return Pt(this,void 0,void 0,function*(){})}get src(){return this.currentSrc}set src(e){this.currentSrc=e,fetch(e).then(t=>t.arrayBuffer()).then(t=>this.audioContext.decodeAudioData(t)).then(t=>{this.buffer=t,this.emit("loadedmetadata"),this.emit("canplay"),this.autoplay&&this.play()})}_play(){var e;this.paused&&(this.paused=!1,(e=this.bufferNode)===null||e===void 0||e.disconnect(),this.bufferNode=this.audioContext.createBufferSource(),this.bufferNode.buffer=this.buffer,this.bufferNode.connect(this.gainNode),this.playedDuration>=this.duration&&(this.playedDuration=0),this.bufferNode.start(this.audioContext.currentTime,this.playedDuration),this.playStartTime=this.audioContext.currentTime,this.bufferNode.onended=()=>{this.currentTime>=this.duration&&(this.pause(),this.emit("ended"))})}_pause(){var e;this.paused||(this.paused=!0,(e=this.bufferNode)===null||e===void 0||e.stop(),this.playedDuration+=this.audioContext.currentTime-this.playStartTime)}play(){return Pt(this,void 0,void 0,function*(){this._play(),this.emit("play")})}pause(){this._pause(),this.emit("pause")}setSinkId(e){return Pt(this,void 0,void 0,function*(){return this.audioContext.setSinkId(e)})}get playbackRate(){var e,t;return(t=(e=this.bufferNode)===null||e===void 0?void 0:e.playbackRate.value)!==null&&t!==void 0?t:1}set playbackRate(e){this.bufferNode&&(this.bufferNode.playbackRate.value=e)}get currentTime(){return this.paused?this.playedDuration:this.playedDuration+this.audioContext.currentTime-this.playStartTime}set currentTime(e){this.emit("seeking"),this.paused?this.playedDuration=e:(this._pause(),this.playedDuration=e,this._play()),this.emit("timeupdate")}get duration(){var e;return((e=this.buffer)===null||e===void 0?void 0:e.duration)||0}get volume(){return this.gainNode.gain.value}set volume(e){this.gainNode.gain.value=e,this.emit("volumechange")}get muted(){return this._muted}set muted(e){this._muted!==e&&(this._muted=e,this._muted?this.gainNode.disconnect():this.gainNode.connect(this.audioContext.destination))}getGainNode(){return this.gainNode}}var Pe=globalThis&&globalThis.__awaiter||function(o,e,t,n){function i(s){return s instanceof t?s:new t(function(a){a(s)})}return new(t||(t=Promise))(function(s,a){function u(r){try{d(n.next(r))}catch(c){a(c)}}function l(r){try{d(n.throw(r))}catch(c){a(c)}}function d(r){r.done?s(r.value):i(r.value).then(u,l)}d((n=n.apply(o,e||[])).next())})};const oo={waveColor:"#999",progressColor:"#555",cursorWidth:1,minPxPerSec:0,fillParent:!0,interact:!0,dragToSeek:!1,autoScroll:!0,autoCenter:!0,sampleRate:8e3};class He extends eo{static create(e){return new He(e)}constructor(e){const t=e.media||(e.backend==="WebAudio"?new io:void 0);super({media:t,mediaControls:e.mediaControls,autoplay:e.autoplay,playbackRate:e.audioRate}),this.plugins=[],this.decodedData=null,this.subscriptions=[],this.mediaSubscriptions=[],this.options=Object.assign({},oo,e),this.timer=new no;const n=t?void 0:this.getMediaElement();this.renderer=new bt(this.options,n),this.initPlayerEvents(),this.initRendererEvents(),this.initTimerEvents(),this.initPlugins();const i=this.options.url||this.getSrc();i?this.load(i,this.options.peaks,this.options.duration):this.options.peaks&&this.options.duration&&this.loadPredecoded()}initTimerEvents(){this.subscriptions.push(this.timer.on("tick",()=>{const e=this.getCurrentTime();this.renderer.renderProgress(e/this.getDuration(),!0),this.emit("timeupdate",e),this.emit("audioprocess",e)}))}initPlayerEvents(){this.mediaSubscriptions.push(this.onMediaEvent("timeupdate",()=>{const e=this.getCurrentTime();this.renderer.renderProgress(e/this.getDuration(),this.isPlaying()),this.emit("timeupdate",e)}),this.onMediaEvent("play",()=>{this.emit("play"),this.timer.start()}),this.onMediaEvent("pause",()=>{this.emit("pause"),this.timer.stop()}),this.onMediaEvent("emptied",()=>{this.timer.stop()}),this.onMediaEvent("ended",()=>{this.emit("finish")}),this.onMediaEvent("seeking",()=>{this.emit("seeking",this.getCurrentTime())}))}initRendererEvents(){this.subscriptions.push(this.renderer.on("click",(e,t)=>{this.options.interact&&(this.seekTo(e),this.emit("interaction",e*this.getDuration()),this.emit("click",e,t))}),this.renderer.on("dblclick",(e,t)=>{this.emit("dblclick",e,t)}),this.renderer.on("scroll",(e,t)=>{const n=this.getDuration();this.emit("scroll",e*n,t*n)}),this.renderer.on("render",()=>{this.emit("redraw")}));{let e;this.subscriptions.push(this.renderer.on("drag",t=>{this.options.interact&&(this.renderer.renderProgress(t),clearTimeout(e),e=setTimeout(()=>{this.seekTo(t)},this.isPlaying()?0:200),this.emit("interaction",t*this.getDuration()),this.emit("drag",t))}))}}initPlugins(){var e;!((e=this.options.plugins)===null||e===void 0)&&e.length&&this.options.plugins.forEach(t=>{this.registerPlugin(t)})}unsubscribePlayerEvents(){this.mediaSubscriptions.forEach(e=>e()),this.mediaSubscriptions=[]}setOptions(e){this.options=Object.assign({},this.options,e),this.renderer.setOptions(this.options),e.audioRate&&this.setPlaybackRate(e.audioRate),e.mediaControls!=null&&(this.getMediaElement().controls=e.mediaControls)}registerPlugin(e){return e.init(this),this.plugins.push(e),this.subscriptions.push(e.once("destroy",()=>{this.plugins=this.plugins.filter(t=>t!==e)})),e}getWrapper(){return this.renderer.getWrapper()}getScroll(){return this.renderer.getScroll()}getActivePlugins(){return this.plugins}loadPredecoded(){return Pe(this,void 0,void 0,function*(){this.options.peaks&&this.options.duration&&(this.decodedData=At.createBuffer(this.options.peaks,this.options.duration),yield Promise.resolve(),this.renderDecoded())})}renderDecoded(){return Pe(this,void 0,void 0,function*(){this.decodedData&&(this.emit("decode",this.getDuration()),this.renderer.render(this.decodedData))})}loadAudio(e,t,n,i){return Pe(this,void 0,void 0,function*(){if(this.emit("load",e),!this.options.media&&this.isPlaying()&&this.pause(),this.decodedData=null,!t&&!n){const s=a=>this.emit("loading",a);t=yield xi.fetchBlob(e,s,this.options.fetchParams)}if(this.setSrc(e,t),i=(yield Promise.resolve(i||this.getDuration()))||(yield new Promise(s=>{this.onceMediaEvent("loadedmetadata",()=>s(this.getDuration()))}))||(yield Promise.resolve(0)),n)this.decodedData=At.createBuffer(n,i);else if(t){const s=yield t.arrayBuffer();this.decodedData=yield At.decode(s,this.options.sampleRate)}this.renderDecoded(),this.emit("ready",this.getDuration())})}load(e,t,n){return Pe(this,void 0,void 0,function*(){yield this.loadAudio(e,void 0,t,n)})}loadBlob(e,t,n){return Pe(this,void 0,void 0,function*(){yield this.loadAudio("blob",e,t,n)})}zoom(e){if(!this.decodedData)throw new Error("No audio loaded");this.renderer.zoom(e),this.emit("zoom",e)}getDecodedData(){return this.decodedData}exportPeaks({channels:e=2,maxLength:t=8e3,precision:n=1e4}={}){if(!this.decodedData)throw new Error("The audio has not been decoded yet");const i=Math.min(e,this.decodedData.numberOfChannels),s=[];for(let a=0;ae.destroy()),this.subscriptions.forEach(e=>e()),this.unsubscribePlayerEvents(),this.timer.destroy(),this.renderer.destroy(),super.destroy()}}let In=class{constructor(){this.listeners={},this.on=this.addEventListener,this.un=this.removeEventListener}addEventListener(e,t,n){if(this.listeners[e]||(this.listeners[e]=new Set),this.listeners[e].add(t),n?.once){const i=()=>{this.removeEventListener(e,i),this.removeEventListener(e,t)};return this.addEventListener(e,i),i}return()=>this.removeEventListener(e,t)}removeEventListener(e,t){var n;(n=this.listeners[e])===null||n===void 0||n.delete(t)}once(e,t){return this.on(e,t,{once:!0})}unAll(){this.listeners={}}emit(e,...t){this.listeners[e]&&this.listeners[e].forEach(n=>n(...t))}},so=class extends In{constructor(e){super(),this.subscriptions=[],this.options=e}onInit(){}init(e){this.wavesurfer=e,this.onInit()}destroy(){this.emit("destroy"),this.subscriptions.forEach(e=>e())}};function ut(o,e,t,n,i=5){let s=()=>{};if(!o)return s;const a=u=>{if(u.button===2)return;u.preventDefault(),u.stopPropagation(),o.style.touchAction="none";let l=u.clientX,d=u.clientY,r=!1;const c=m=>{m.preventDefault(),m.stopPropagation();const _=m.clientX,g=m.clientY;if(r||Math.abs(_-l)>=i||Math.abs(g-d)>=i){const{left:v,top:y}=o.getBoundingClientRect();r||(r=!0,t?.(l-v,d-y)),e(_-l,g-d,_-v,g-y),l=_,d=g}},h=m=>{r&&(m.preventDefault(),m.stopPropagation())},f=()=>{o.style.touchAction="",r&&n?.(),s()};document.addEventListener("pointermove",c),document.addEventListener("pointerup",f),document.addEventListener("pointerleave",f),document.addEventListener("click",h,!0),s=()=>{document.removeEventListener("pointermove",c),document.removeEventListener("pointerup",f),document.removeEventListener("pointerleave",f),setTimeout(()=>{document.removeEventListener("click",h,!0)},10)}};return o.addEventListener("pointerdown",a),()=>{s(),o.removeEventListener("pointerdown",a)}}class rn extends In{constructor(e,t,n=0){var i,s,a,u,l,d,r;super(),this.totalDuration=t,this.numberOfChannels=n,this.minLength=0,this.maxLength=1/0,this.id=e.id||`region-${Math.random().toString(32).slice(2)}`,this.start=this.clampPosition(e.start),this.end=this.clampPosition((i=e.end)!==null&&i!==void 0?i:e.start),this.drag=(s=e.drag)===null||s===void 0||s,this.resize=(a=e.resize)===null||a===void 0||a,this.color=(u=e.color)!==null&&u!==void 0?u:"rgba(0, 0, 0, 0.1)",this.minLength=(l=e.minLength)!==null&&l!==void 0?l:this.minLength,this.maxLength=(d=e.maxLength)!==null&&d!==void 0?d:this.maxLength,this.channelIdx=(r=e.channelIdx)!==null&&r!==void 0?r:-1,this.element=this.initElement(),this.setContent(e.content),this.setPart(),this.renderPosition(),this.initMouseEvents()}clampPosition(e){return Math.max(0,Math.min(this.totalDuration,e))}setPart(){const e=this.start===this.end;this.element.setAttribute("part",`${e?"marker":"region"} ${this.id}`)}addResizeHandles(e){const t=document.createElement("div");t.setAttribute("data-resize","left"),t.setAttribute("style",`
- position: absolute;
- z-index: 2;
- width: 6px;
- height: 100%;
- top: 0;
- left: 0;
- border-left: 2px solid rgba(0, 0, 0, 0.5);
- border-radius: 2px 0 0 2px;
- cursor: ew-resize;
- word-break: keep-all;
- `),t.setAttribute("part","region-handle region-handle-left");const n=t.cloneNode();n.setAttribute("data-resize","right"),n.style.left="",n.style.right="0",n.style.borderRight=n.style.borderLeft,n.style.borderLeft="",n.style.borderRadius="0 2px 2px 0",n.setAttribute("part","region-handle region-handle-right"),e.appendChild(t),e.appendChild(n),ut(t,i=>this.onResize(i,"start"),()=>null,()=>this.onEndResizing(),1),ut(n,i=>this.onResize(i,"end"),()=>null,()=>this.onEndResizing(),1)}removeResizeHandles(e){const t=e.querySelector('[data-resize="left"]'),n=e.querySelector('[data-resize="right"]');t&&e.removeChild(t),n&&e.removeChild(n)}initElement(){const e=document.createElement("div"),t=this.start===this.end;let n=0,i=100;return this.channelIdx>=0&&this.channelIdxthis.emit("click",t)),e.addEventListener("mouseenter",t=>this.emit("over",t)),e.addEventListener("mouseleave",t=>this.emit("leave",t)),e.addEventListener("dblclick",t=>this.emit("dblclick",t)),ut(e,t=>this.onMove(t),()=>this.onStartMoving(),()=>this.onEndMoving()))}onStartMoving(){this.drag&&(this.element.style.cursor="grabbing")}onEndMoving(){this.drag&&(this.element.style.cursor="grab",this.emit("update-end"))}_onUpdate(e,t){if(!this.element.parentElement)return;const n=e/this.element.parentElement.clientWidth*this.totalDuration,i=t&&t!=="start"?this.start:this.start+n,s=t&&t!=="end"?this.end:this.end+n,a=s-i;i>=0&&s<=this.totalDuration&&i<=s&&a>=this.minLength&&a<=this.maxLength&&(this.start=i,this.end=s,this.renderPosition(),this.emit("update"))}onMove(e){this.drag&&this._onUpdate(e)}onResize(e,t){this.resize&&this._onUpdate(e,t)}onEndResizing(){this.resize&&this.emit("update-end")}_setTotalDuration(e){this.totalDuration=e,this.renderPosition()}play(){this.emit("play")}setContent(e){var t;if((t=this.content)===null||t===void 0||t.remove(),e){if(typeof e=="string"){this.content=document.createElement("div");const n=this.start===this.end;this.content.style.padding=`0.2em ${n?.2:.4}em`,this.content.textContent=e}else this.content=e;this.content.setAttribute("part","region-content"),this.element.appendChild(this.content)}else this.content=void 0}setOptions(e){var t,n;if(e.color&&(this.color=e.color,this.element.style.backgroundColor=this.color),e.drag!==void 0&&(this.drag=e.drag,this.element.style.cursor=this.drag?"grab":"default"),e.start!==void 0||e.end!==void 0){const i=this.start===this.end;this.start=this.clampPosition((t=e.start)!==null&&t!==void 0?t:this.start),this.end=this.clampPosition((n=e.end)!==null&&n!==void 0?n:i?this.start:this.end),this.renderPosition(),this.setPart()}if(e.content&&this.setContent(e.content),e.id&&(this.id=e.id,this.setPart()),e.resize!==void 0&&e.resize!==this.resize){const i=this.start===this.end;this.resize=e.resize,this.resize&&!i?this.addResizeHandles(this.element):this.removeResizeHandles(this.element)}}remove(){this.emit("remove"),this.element.remove(),this.element=null}}let ro=class Nn extends so{constructor(e){super(e),this.regions=[],this.regionsContainer=this.initRegionsContainer()}static create(e){return new Nn(e)}onInit(){if(!this.wavesurfer)throw Error("WaveSurfer is not initialized");this.wavesurfer.getWrapper().appendChild(this.regionsContainer);let e=[];this.subscriptions.push(this.wavesurfer.on("timeupdate",t=>{const n=this.regions.filter(i=>i.start<=t&&i.end>=t);n.forEach(i=>{e.includes(i)||this.emit("region-in",i)}),e.forEach(i=>{n.includes(i)||this.emit("region-out",i)}),e=n}))}initRegionsContainer(){const e=document.createElement("div");return e.setAttribute("style",`
- position: absolute;
- top: 0;
- left: 0;
- width: 100%;
- height: 100%;
- z-index: 3;
- pointer-events: none;
- `),e}getRegions(){return this.regions}avoidOverlapping(e){if(!e.content)return;const t=e.content,n=t.getBoundingClientRect().left,i=e.element.scrollWidth,s=this.regions.filter(a=>{if(a===e||!a.content)return!1;const u=a.content.getBoundingClientRect().left,l=a.element.scrollWidth;return n{var u;return((u=a.content)===null||u===void 0?void 0:u.getBoundingClientRect().height)||0}).reduce((a,u)=>a+u,0);t.style.marginTop=`${s}px`}saveRegion(e){this.regionsContainer.appendChild(e.element),this.avoidOverlapping(e),this.regions.push(e);const t=[e.on("update-end",()=>{this.avoidOverlapping(e),this.emit("region-updated",e)}),e.on("play",()=>{var n,i;(n=this.wavesurfer)===null||n===void 0||n.play(),(i=this.wavesurfer)===null||i===void 0||i.setTime(e.start)}),e.on("click",n=>{this.emit("region-clicked",e,n)}),e.on("dblclick",n=>{this.emit("region-double-clicked",e,n)}),e.once("remove",()=>{t.forEach(n=>n()),this.regions=this.regions.filter(n=>n!==e)})];this.subscriptions.push(...t),this.emit("region-created",e)}addRegion(e){var t,n;if(!this.wavesurfer)throw Error("WaveSurfer is not initialized");const i=this.wavesurfer.getDuration(),s=(n=(t=this.wavesurfer)===null||t===void 0?void 0:t.getDecodedData())===null||n===void 0?void 0:n.numberOfChannels,a=new rn(e,i,s);return i?this.saveRegion(a):this.subscriptions.push(this.wavesurfer.once("ready",u=>{a._setTotalDuration(u),this.saveRegion(a)})),a}enableDragSelection(e){var t,n;const i=(n=(t=this.wavesurfer)===null||t===void 0?void 0:t.getWrapper())===null||n===void 0?void 0:n.querySelector("div");if(!i)return()=>{};let s=null,a=0;return ut(i,(u,l,d)=>{s&&s._onUpdate(u,d>a?"end":"start")},u=>{var l,d;if(a=u,!this.wavesurfer)return;const r=this.wavesurfer.getDuration(),c=(d=(l=this.wavesurfer)===null||l===void 0?void 0:l.getDecodedData())===null||d===void 0?void 0:d.numberOfChannels,h=this.wavesurfer.getWrapper().clientWidth,f=u/h*r,m=(u+5)/h*r;s=new rn(Object.assign(Object.assign({},e),{start:f,end:m}),r,c),this.regionsContainer.appendChild(s.element)},()=>{s&&(this.saveRegion(s),s=null)})}clearRegions(){this.regions.forEach(e=>e.remove())}destroy(){this.clearRegions(),super.destroy()}};function ao(o){const e=o.numberOfChannels,t=o.length*e*2+44,n=new ArrayBuffer(t),i=new DataView(n);let s=0;const a=function(u,l,d){for(let r=0;r{const n=new AudioContext,i=o.numberOfChannels,s=o.sampleRate;let a=o.length,u=0;e&&t&&(u=Math.round(e*s),a=Math.round(t*s)-u);const l=n.createBuffer(i,a,s);for(let d=0;d{o&&o.skip(e)},ze=(o,e)=>o/100*(e||5);const{SvelteComponent:lo,append:J,attr:O,check_outros:dt,create_component:qe,destroy_component:Ue,detach:Ce,element:te,empty:uo,group_outros:ct,init:co,insert:Ee,listen:be,mount_component:je,noop:xe,run_all:Hn,safe_not_equal:ho,set_data:fo,space:Te,text:an,transition_in:Y,transition_out:K}=window.__gradio__svelte__internal;function mo(o){let e,t;return e=new ui({}),{c(){qe(e.$$.fragment)},m(n,i){je(e,n,i),t=!0},i(n){t||(Y(e.$$.fragment,n),t=!0)},o(n){K(e.$$.fragment,n),t=!1},d(n){Ue(e,n)}}}function _o(o){let e,t;return e=new On({}),{c(){qe(e.$$.fragment)},m(n,i){je(e,n,i),t=!0},i(n){t||(Y(e.$$.fragment,n),t=!0)},o(n){K(e.$$.fragment,n),t=!1},d(n){Ue(e,n)}}}function ln(o){let e,t,n,i,s;return t=new ci({}),{c(){e=te("button"),qe(t.$$.fragment),O(e,"class","action icon svelte-k0z87h"),O(e,"aria-label","Reset audio")},m(a,u){Ee(a,e,u),je(t,e,null),n=!0,i||(s=be(e,"click",o[26]),i=!0)},p:xe,i(a){n||(Y(t.$$.fragment,a),n=!0)},o(a){K(t.$$.fragment,a),n=!1},d(a){a&&Ce(e),Ue(t),i=!1,s()}}}function un(o){let e,t,n,i;const s=[go,po],a=[];function u(l,d){return l[0]===""?0:1}return e=u(o),t=a[e]=s[e](o),{c(){t.c(),n=uo()},m(l,d){a[e].m(l,d),Ee(l,n,d),i=!0},p(l,d){let r=e;e=u(l),e===r?a[e].p(l,d):(ct(),K(a[r],1,1,()=>{a[r]=null}),dt(),t=a[e],t?t.p(l,d):(t=a[e]=s[e](l),t.c()),Y(t,1),t.m(n.parentNode,n))},i(l){i||(Y(t),i=!0)},o(l){K(t),i=!1},d(l){l&&Ce(n),a[e].d(l)}}}function po(o){let e,t,n,i,s;return{c(){e=te("button"),e.textContent="Trim",t=Te(),n=te("button"),n.textContent="Cancel",O(e,"class","text-button svelte-k0z87h"),O(n,"class","text-button svelte-k0z87h")},m(a,u){Ee(a,e,u),Ee(a,t,u),Ee(a,n,u),i||(s=[be(e,"click",o[11]),be(n,"click",o[13])],i=!0)},p:xe,i:xe,o:xe,d(a){a&&(Ce(e),Ce(t),Ce(n)),i=!1,Hn(s)}}}function go(o){let e,t,n,i,s;return t=new di({}),{c(){e=te("button"),qe(t.$$.fragment),O(e,"class","action icon svelte-k0z87h"),O(e,"aria-label","Trim audio to selection")},m(a,u){Ee(a,e,u),je(t,e,null),n=!0,i||(s=be(e,"click",o[13]),i=!0)},p:xe,i(a){n||(Y(t.$$.fragment,a),n=!0)},o(a){K(t.$$.fragment,a),n=!1},d(a){a&&Ce(e),Ue(t),i=!1,s()}}}function vo(o){let e,t,n,i,s,a,u,l,d,r,c,h,f,m,_,g,v,y,k,D,M,A,E,P,T,I;r=new Si({});const B=[_o,mo],W=[];function b(p,z){return p[4]?0:1}m=b(o),_=W[m]=B[m](o),k=new zi({});let L=o[5]&&o[0]===""&&ln(o),S=o[6]&&un(o);return{c(){e=te("div"),t=te("button"),n=te("span"),i=an(o[9]),s=an("x"),u=Te(),l=te("div"),d=te("button"),qe(r.$$.fragment),h=Te(),f=te("button"),_.c(),v=Te(),y=te("button"),qe(k.$$.fragment),M=Te(),A=te("div"),L&&L.c(),E=Te(),S&&S.c(),O(n,"class","svelte-k0z87h"),O(t,"class","playback icon svelte-k0z87h"),O(t,"aria-label",a=`Adjust playback speed to ${o[10][(o[10].indexOf(o[9])+1)%o[10].length]}x`),O(d,"class","rewind icon svelte-k0z87h"),O(d,"aria-label",c=`Skip backwards by ${ze(o[2],o[8].skip_length)} seconds`),O(f,"class","play-pause-button icon svelte-k0z87h"),O(f,"aria-label",g=o[4]?o[3]("common.play"):o[3]("common.pause")),O(y,"class","skip icon svelte-k0z87h"),O(y,"aria-label",D="Skip forward by "+ze(o[2],o[8].skip_length)+" seconds"),O(l,"class","play-pause-wrapper svelte-k0z87h"),O(A,"class","settings-wrapper svelte-k0z87h"),O(e,"class","controls svelte-k0z87h"),O(e,"data-testid","waveform-controls")},m(p,z){Ee(p,e,z),J(e,t),J(t,n),J(n,i),J(n,s),J(e,u),J(e,l),J(l,d),je(r,d,null),J(l,h),J(l,f),W[m].m(f,null),J(l,v),J(l,y),je(k,y,null),J(e,M),J(e,A),L&&L.m(A,null),J(A,E),S&&S.m(A,null),P=!0,T||(I=[be(t,"click",o[22]),be(d,"click",o[23]),be(f,"click",o[24]),be(y,"click",o[25])],T=!0)},p(p,[z]){(!P||z&512)&&fo(i,p[9]),(!P||z&512&&a!==(a=`Adjust playback speed to ${p[10][(p[10].indexOf(p[9])+1)%p[10].length]}x`))&&O(t,"aria-label",a),(!P||z&260&&c!==(c=`Skip backwards by ${ze(p[2],p[8].skip_length)} seconds`))&&O(d,"aria-label",c);let w=m;m=b(p),m!==w&&(ct(),K(W[w],1,1,()=>{W[w]=null}),dt(),_=W[m],_||(_=W[m]=B[m](p),_.c()),Y(_,1),_.m(f,null)),(!P||z&24&&g!==(g=p[4]?p[3]("common.play"):p[3]("common.pause")))&&O(f,"aria-label",g),(!P||z&260&&D!==(D="Skip forward by "+ze(p[2],p[8].skip_length)+" seconds"))&&O(y,"aria-label",D),p[5]&&p[0]===""?L?(L.p(p,z),z&33&&Y(L,1)):(L=ln(p),L.c(),Y(L,1),L.m(A,E)):L&&(ct(),K(L,1,1,()=>{L=null}),dt()),p[6]?S?(S.p(p,z),z&64&&Y(S,1)):(S=un(p),S.c(),Y(S,1),S.m(A,null)):S&&(ct(),K(S,1,1,()=>{S=null}),dt())},i(p){P||(Y(r.$$.fragment,p),Y(_),Y(k.$$.fragment,p),Y(L),Y(S),P=!0)},o(p){K(r.$$.fragment,p),K(_),K(k.$$.fragment,p),K(L),K(S),P=!1},d(p){p&&Ce(e),Ue(r),W[m].d(),Ue(k),L&&L.d(),S&&S.d(),T=!1,Hn(I)}}}function bo(o,e,t){let{waveform:n}=e,{audioDuration:i}=e,{i18n:s}=e,{playing:a}=e,{showRedo:u=!1}=e,{interactive:l=!1}=e,{handle_trim_audio:d}=e,{mode:r=""}=e,{container:c}=e,{handle_reset_value:h}=e,{waveform_settings:f={}}=e,{trimDuration:m=0}=e,_=[.5,1,1.5,2],g=_[1],v,y=null,k,D,M="";const A=()=>{t(18,y=v.addRegion({start:i/4,end:i/2,color:"hsla(15, 85%, 40%, 0.4)",drag:!0,resize:!0})),t(14,m=y.end-y.start)},E=()=>{if(n&&v&&y){const p=y.start,z=y.end;d(p,z),t(0,r=""),t(18,y=null)}},P=()=>{v?.getRegions().forEach(p=>{p.remove()}),v?.clearRegions()},T=()=>{P(),r==="edit"?t(0,r=""):(t(0,r="edit"),A())},I=(p,z)=>{let w,j;y&&(p==="left"?z==="ArrowLeft"?(w=y.start-.05,j=y.end):(w=y.start+.05,j=y.end):z==="ArrowLeft"?(w=y.start,j=y.end-.05):(w=y.start,j=y.end+.05),y.setOptions({start:w,end:j}),t(14,m=y.end-y.start))},B=()=>{t(9,g=_[(_.indexOf(g)+1)%_.length]),n.setPlaybackRate(g)},W=()=>n.skip(ze(i,f.skip_length)*-1),b=()=>n.playPause(),L=()=>n.skip(ze(i,f.skip_length)),S=()=>{h(),P(),t(0,r="")};return o.$$set=p=>{"waveform"in p&&t(1,n=p.waveform),"audioDuration"in p&&t(2,i=p.audioDuration),"i18n"in p&&t(3,s=p.i18n),"playing"in p&&t(4,a=p.playing),"showRedo"in p&&t(5,u=p.showRedo),"interactive"in p&&t(6,l=p.interactive),"handle_trim_audio"in p&&t(15,d=p.handle_trim_audio),"mode"in p&&t(0,r=p.mode),"container"in p&&t(16,c=p.container),"handle_reset_value"in p&&t(7,h=p.handle_reset_value),"waveform_settings"in p&&t(8,f=p.waveform_settings),"trimDuration"in p&&t(14,m=p.trimDuration)},o.$$.update=()=>{if(o.$$.dirty&2&&t(17,v=n.registerPlugin(ro.create())),o.$$.dirty&131072&&v?.on("region-out",p=>{p.play()}),o.$$.dirty&131072&&v?.on("region-updated",p=>{t(14,m=p.end-p.start)}),o.$$.dirty&131072&&v?.on("region-clicked",(p,z)=>{z.stopPropagation(),t(18,y=p),p.play()}),o.$$.dirty&2031616&&y){const p=c.children[0].shadowRoot;t(20,D=p.querySelector('[data-resize="right"]')),t(19,k=p.querySelector('[data-resize="left"]')),k&&D&&(k.setAttribute("role","button"),D.setAttribute("role","button"),k?.setAttribute("aria-label","Drag to adjust start time"),D?.setAttribute("aria-label","Drag to adjust end time"),k?.setAttribute("tabindex","0"),D?.setAttribute("tabindex","0"),k.addEventListener("focus",()=>{v&&t(21,M="left")}),D.addEventListener("focus",()=>{v&&t(21,M="right")}))}o.$$.dirty&2228224&&v&&window.addEventListener("keydown",p=>{p.key==="ArrowLeft"?I(M,"ArrowLeft"):p.key==="ArrowRight"&&I(M,"ArrowRight")})},[r,n,i,s,a,u,l,h,f,g,_,E,P,T,m,d,c,v,y,k,D,M,B,W,b,L,S]}class qn extends lo{constructor(e){super(),co(this,e,bo,vo,ho,{waveform:1,audioDuration:2,i18n:3,playing:4,showRedo:5,interactive:6,handle_trim_audio:15,mode:0,container:16,handle_reset_value:7,waveform_settings:8,trimDuration:14})}}const{SvelteComponent:wo,add_flush_callback:dn,append:se,attr:$,bind:cn,binding_callbacks:et,check_outros:Un,create_component:Ut,destroy_component:jt,detach:Ft,element:ge,empty:yo,group_outros:jn,init:ko,insert:Vt,mount_component:Xt,safe_not_equal:Co,set_data:Eo,space:lt,text:Ro,transition_in:we,transition_out:De}=window.__gradio__svelte__internal,{onMount:So}=window.__gradio__svelte__internal;function Do(o){let e,t,n,i,s,a,u,l,d,r,c,h,f,m=o[0]==="edit"&&o[13]>0&&hn(o),_=o[8]&&fn(o);return{c(){e=ge("div"),t=ge("div"),n=ge("div"),i=lt(),s=ge("div"),a=ge("time"),a.textContent="0:00",u=lt(),l=ge("div"),m&&m.c(),d=lt(),r=ge("time"),r.textContent="0:00",c=lt(),_&&_.c(),$(n,"id","waveform"),$(n,"class","svelte-15pl8d9"),$(t,"class","waveform-container svelte-15pl8d9"),$(a,"id","time"),$(a,"class","svelte-15pl8d9"),$(r,"id","duration"),$(r,"class","svelte-15pl8d9"),$(s,"class","timestamps svelte-15pl8d9"),$(e,"class","component-wrapper svelte-15pl8d9"),$(e,"data-testid",h=o[2]?"waveform-"+o[2]:"unlabelled-audio")},m(g,v){Vt(g,e,v),se(e,t),se(t,n),o[19](n),se(e,i),se(e,s),se(s,a),o[20](a),se(s,u),se(s,l),m&&m.m(l,null),se(l,d),se(l,r),o[21](r),se(e,c),_&&_.m(e,null),f=!0},p(g,v){g[0]==="edit"&&g[13]>0?m?m.p(g,v):(m=hn(g),m.c(),m.m(l,d)):m&&(m.d(1),m=null),g[8]?_?(_.p(g,v),v&256&&we(_,1)):(_=fn(g),_.c(),we(_,1),_.m(e,null)):_&&(jn(),De(_,1,1,()=>{_=null}),Un()),(!f||v&4&&h!==(h=g[2]?"waveform-"+g[2]:"unlabelled-audio"))&&$(e,"data-testid",h)},i(g){f||(we(_),f=!0)},o(g){De(_),f=!1},d(g){g&&Ft(e),o[19](null),o[20](null),m&&m.d(),o[21](null),_&&_.d()}}}function Mo(o){let e,t;return e=new zn({props:{size:"small",$$slots:{default:[Lo]},$$scope:{ctx:o}}}),{c(){Ut(e.$$.fragment)},m(n,i){Xt(e,n,i),t=!0},p(n,i){const s={};i&67108864&&(s.$$scope={dirty:i,ctx:n}),e.$set(s)},i(n){t||(we(e.$$.fragment,n),t=!0)},o(n){De(e.$$.fragment,n),t=!1},d(n){jt(e,n)}}}function hn(o){let e,t=o[14](o[13])+"",n;return{c(){e=ge("time"),n=Ro(t),$(e,"id","trim-duration"),$(e,"class","svelte-15pl8d9")},m(i,s){Vt(i,e,s),se(e,n)},p(i,s){s&8192&&t!==(t=i[14](i[13])+"")&&Eo(n,t)},d(i){i&&Ft(e)}}}function fn(o){let e,t,n,i;function s(l){o[22](l)}function a(l){o[23](l)}let u={container:o[7],waveform:o[8],playing:o[11],audioDuration:o[12],i18n:o[3],interactive:o[4],handle_trim_audio:o[15],showRedo:o[4],handle_reset_value:o[6],waveform_settings:o[5]};return o[0]!==void 0&&(u.mode=o[0]),o[13]!==void 0&&(u.trimDuration=o[13]),e=new qn({props:u}),et.push(()=>cn(e,"mode",s)),et.push(()=>cn(e,"trimDuration",a)),{c(){Ut(e.$$.fragment)},m(l,d){Xt(e,l,d),i=!0},p(l,d){const r={};d&128&&(r.container=l[7]),d&256&&(r.waveform=l[8]),d&2048&&(r.playing=l[11]),d&4096&&(r.audioDuration=l[12]),d&8&&(r.i18n=l[3]),d&16&&(r.interactive=l[4]),d&16&&(r.showRedo=l[4]),d&64&&(r.handle_reset_value=l[6]),d&32&&(r.waveform_settings=l[5]),!t&&d&1&&(t=!0,r.mode=l[0],dn(()=>t=!1)),!n&&d&8192&&(n=!0,r.trimDuration=l[13],dn(()=>n=!1)),e.$set(r)},i(l){i||(we(e.$$.fragment,l),i=!0)},o(l){De(e.$$.fragment,l),i=!1},d(l){jt(e,l)}}}function Lo(o){let e,t;return e=new gt({}),{c(){Ut(e.$$.fragment)},m(n,i){Xt(e,n,i),t=!0},i(n){t||(we(e.$$.fragment,n),t=!0)},o(n){De(e.$$.fragment,n),t=!1},d(n){jt(e,n)}}}function Ao(o){let e,t,n,i;const s=[Mo,Do],a=[];function u(l,d){return l[1]===null?0:1}return e=u(o),t=a[e]=s[e](o),{c(){t.c(),n=yo()},m(l,d){a[e].m(l,d),Vt(l,n,d),i=!0},p(l,[d]){let r=e;e=u(l),e===r?a[e].p(l,d):(jn(),De(a[r],1,1,()=>{a[r]=null}),Un(),t=a[e],t?t.p(l,d):(t=a[e]=s[e](l),t.c()),we(t,1),t.m(n.parentNode,n))},i(l){i||(we(t),i=!0)},o(l){De(t),i=!1},d(l){l&&Ft(n),a[e].d(l)}}}function Po(o,e,t){let{value:n=null}=e,{label:i}=e,{autoplay:s}=e,{i18n:a}=e,{dispatch:u}=e,{dispatch_blob:l=()=>Promise.resolve()}=e,{interactive:d=!1}=e,{waveform_settings:r={}}=e,{mode:c=""}=e,{handle_reset_value:h=()=>{}}=e,f,m,_=!1,g,v,y,k=0;const D=b=>{const L=Math.floor(b/60),p=`0${Math.round(b)%60}`.slice(-2);return`${L}:${p}`},M=()=>{t(8,m=He.create({container:f,url:n?.url,...r}))},A=async(b,L)=>{t(0,c="");const S=m.getDecodedData();S&&await Bt(S,b,L).then(async p=>{await l([p],"change"),m.destroy(),M()}),u("edit")};async function E(b){await hi(b).then(L=>{if(L)return m?.load(L)})}So(()=>{window.addEventListener("keydown",b=>{b.key==="ArrowRight"&&c!=="edit"?mt(m,.1):b.key==="ArrowLeft"&&c!=="edit"&&mt(m,-.1)})});function P(b){et[b?"unshift":"push"](()=>{f=b,t(7,f),t(8,m)})}function T(b){et[b?"unshift":"push"](()=>{g=b,t(9,g),t(8,m)})}function I(b){et[b?"unshift":"push"](()=>{v=b,t(10,v),t(8,m)})}function B(b){c=b,t(0,c)}function W(b){k=b,t(13,k)}return o.$$set=b=>{"value"in b&&t(1,n=b.value),"label"in b&&t(2,i=b.label),"autoplay"in b&&t(16,s=b.autoplay),"i18n"in b&&t(3,a=b.i18n),"dispatch"in b&&t(17,u=b.dispatch),"dispatch_blob"in b&&t(18,l=b.dispatch_blob),"interactive"in b&&t(4,d=b.interactive),"waveform_settings"in b&&t(5,r=b.waveform_settings),"mode"in b&&t(0,c=b.mode),"handle_reset_value"in b&&t(6,h=b.handle_reset_value)},o.$$.update=()=>{o.$$.dirty&384&&f!==void 0&&(m!==void 0&&m.destroy(),t(7,f.innerHTML="",f),M(),t(11,_=!1)),o.$$.dirty&65792&&s&&(m?.play(),t(11,_=!0)),o.$$.dirty&1280&&m?.on("decode",b=>{t(12,y=b),v&&t(10,v.textContent=D(b),v)}),o.$$.dirty&768&&m?.on("timeupdate",b=>g&&t(9,g.textContent=D(b),g)),o.$$.dirty&131328&&m?.on("finish",()=>{t(11,_=!1),u("stop"),u("end")}),o.$$.dirty&131328&&m?.on("pause",()=>{t(11,_=!1),u("pause")}),o.$$.dirty&131328&&m?.on("play",()=>{t(11,_=!0),u("play")}),o.$$.dirty&2&&n?.url&&E(n.url)},[c,n,i,a,d,r,h,f,m,g,v,_,y,k,D,A,s,u,l,P,T,I,B,W]}class To extends wo{constructor(e){super(),ko(this,e,Po,Ao,Co,{value:1,label:2,autoplay:16,i18n:3,dispatch:17,dispatch_blob:18,interactive:4,waveform_settings:5,mode:0,handle_reset_value:6})}}const Fn=To;const{SvelteComponent:Wo,append:zo,attr:We,bubble:mn,check_outros:Ot,create_component:Xe,destroy_component:Ge,detach:tt,element:Vn,empty:Bo,group_outros:It,init:Oo,insert:nt,mount_component:Ye,safe_not_equal:Io,space:Nt,transition_in:Q,transition_out:ie}=window.__gradio__svelte__internal,{createEventDispatcher:No}=window.__gradio__svelte__internal;function Ho(o){let e,t;return e=new zn({props:{size:"small",$$slots:{default:[Uo]},$$scope:{ctx:o}}}),{c(){Xe(e.$$.fragment)},m(n,i){Ye(e,n,i),t=!0},p(n,i){const s={};i&4096&&(s.$$scope={dirty:i,ctx:n}),e.$set(s)},i(n){t||(Q(e.$$.fragment,n),t=!0)},o(n){ie(e.$$.fragment,n),t=!1},d(n){Ge(e,n)}}}function qo(o){let e,t,n,i,s,a=o[4]&&_n(o),u=o[5]&&pn(o);return i=new Fn({props:{value:o[0],label:o[1],autoplay:o[3],i18n:o[6],dispatch:o[8],waveform_settings:o[7]}}),{c(){e=Vn("div"),a&&a.c(),t=Nt(),u&&u.c(),n=Nt(),Xe(i.$$.fragment),We(e,"class","icon-buttons svelte-rvdo70")},m(l,d){nt(l,e,d),a&&a.m(e,null),zo(e,t),u&&u.m(e,null),nt(l,n,d),Ye(i,l,d),s=!0},p(l,d){l[4]?a?(a.p(l,d),d&16&&Q(a,1)):(a=_n(l),a.c(),Q(a,1),a.m(e,t)):a&&(It(),ie(a,1,1,()=>{a=null}),Ot()),l[5]?u?(u.p(l,d),d&32&&Q(u,1)):(u=pn(l),u.c(),Q(u,1),u.m(e,null)):u&&(It(),ie(u,1,1,()=>{u=null}),Ot());const r={};d&1&&(r.value=l[0]),d&2&&(r.label=l[1]),d&8&&(r.autoplay=l[3]),d&64&&(r.i18n=l[6]),d&128&&(r.waveform_settings=l[7]),i.$set(r)},i(l){s||(Q(a),Q(u),Q(i.$$.fragment,l),s=!0)},o(l){ie(a),ie(u),ie(i.$$.fragment,l),s=!1},d(l){l&&(tt(e),tt(n)),a&&a.d(),u&&u.d(),Ge(i,l)}}}function Uo(o){let e,t;return e=new gt({}),{c(){Xe(e.$$.fragment)},m(n,i){Ye(e,n,i),t=!0},i(n){t||(Q(e.$$.fragment,n),t=!0)},o(n){ie(e.$$.fragment,n),t=!1},d(n){Ge(e,n)}}}function _n(o){let e,t,n,i,s;return t=new ri({props:{Icon:li,label:o[6]("common.download")}}),{c(){e=Vn("a"),Xe(t.$$.fragment),We(e,"href",n=o[0].url),We(e,"target",window.__is_colab__?"_blank":null),We(e,"download",i=o[0].url)},m(a,u){nt(a,e,u),Ye(t,e,null),s=!0},p(a,u){const l={};u&64&&(l.label=a[6]("common.download")),t.$set(l),(!s||u&1&&n!==(n=a[0].url))&&We(e,"href",n),(!s||u&1&&i!==(i=a[0].url))&&We(e,"download",i)},i(a){s||(Q(t.$$.fragment,a),s=!0)},o(a){ie(t.$$.fragment,a),s=!1},d(a){a&&tt(e),Ge(t)}}}function pn(o){let e,t;return e=new ai({props:{i18n:o[6],formatter:o[9],value:o[0]}}),e.$on("error",o[10]),e.$on("share",o[11]),{c(){Xe(e.$$.fragment)},m(n,i){Ye(e,n,i),t=!0},p(n,i){const s={};i&64&&(s.i18n=n[6]),i&1&&(s.value=n[0]),e.$set(s)},i(n){t||(Q(e.$$.fragment,n),t=!0)},o(n){ie(e.$$.fragment,n),t=!1},d(n){Ge(e,n)}}}function jo(o){let e,t,n,i,s,a;e=new Wn({props:{show_label:o[2],Icon:gt,float:!1,label:o[1]||o[6]("audio.audio")}});const u=[qo,Ho],l=[];function d(r,c){return r[0]!==null?0:1}return n=d(o),i=l[n]=u[n](o),{c(){Xe(e.$$.fragment),t=Nt(),i.c(),s=Bo()},m(r,c){Ye(e,r,c),nt(r,t,c),l[n].m(r,c),nt(r,s,c),a=!0},p(r,[c]){const h={};c&4&&(h.show_label=r[2]),c&66&&(h.label=r[1]||r[6]("audio.audio")),e.$set(h);let f=n;n=d(r),n===f?l[n].p(r,c):(It(),ie(l[f],1,1,()=>{l[f]=null}),Ot(),i=l[n],i?i.p(r,c):(i=l[n]=u[n](r),i.c()),Q(i,1),i.m(s.parentNode,s))},i(r){a||(Q(e.$$.fragment,r),Q(i),a=!0)},o(r){ie(e.$$.fragment,r),ie(i),a=!1},d(r){r&&(tt(t),tt(s)),Ge(e,r),l[n].d(r)}}}function Fo(o,e,t){let{value:n=null}=e,{label:i}=e,{show_label:s=!0}=e,{autoplay:a}=e,{show_download_button:u=!0}=e,{show_share_button:l=!1}=e,{i18n:d}=e,{waveform_settings:r={}}=e;const c=No(),h=async _=>_?` `:"";function f(_){mn.call(this,o,_)}function m(_){mn.call(this,o,_)}return o.$$set=_=>{"value"in _&&t(0,n=_.value),"label"in _&&t(1,i=_.label),"show_label"in _&&t(2,s=_.show_label),"autoplay"in _&&t(3,a=_.autoplay),"show_download_button"in _&&t(4,u=_.show_download_button),"show_share_button"in _&&t(5,l=_.show_share_button),"i18n"in _&&t(6,d=_.i18n),"waveform_settings"in _&&t(7,r=_.waveform_settings)},o.$$.update=()=>{o.$$.dirty&1&&n&&c("change",n)},[n,i,s,a,u,l,d,r,c,h,f,m]}class Vo extends Wo{constructor(e){super(),Oo(this,e,Fo,jo,Io,{value:0,label:1,show_label:2,autoplay:3,show_download_button:4,show_share_button:5,i18n:6,waveform_settings:7})}}const Xo=Vo;function Tt(o,e,t,n){return new(t||(t=Promise))(function(i,s){function a(d){try{l(n.next(d))}catch(r){s(r)}}function u(d){try{l(n.throw(d))}catch(r){s(r)}}function l(d){var r;d.done?i(d.value):(r=d.value,r instanceof t?r:new t(function(c){c(r)})).then(a,u)}l((n=n.apply(o,e||[])).next())})}class Go{constructor(){this.listeners={},this.on=this.addEventListener,this.un=this.removeEventListener}addEventListener(e,t,n){if(this.listeners[e]||(this.listeners[e]=new Set),this.listeners[e].add(t),n?.once){const i=()=>{this.removeEventListener(e,i),this.removeEventListener(e,t)};return this.addEventListener(e,i),i}return()=>this.removeEventListener(e,t)}removeEventListener(e,t){var n;(n=this.listeners[e])===null||n===void 0||n.delete(t)}once(e,t){return this.on(e,t,{once:!0})}unAll(){this.listeners={}}emit(e,...t){this.listeners[e]&&this.listeners[e].forEach(n=>n(...t))}}class Yo extends Go{constructor(e){super(),this.subscriptions=[],this.options=e}onInit(){}init(e){this.wavesurfer=e,this.onInit()}destroy(){this.emit("destroy"),this.subscriptions.forEach(e=>e())}}const Zo=["audio/webm","audio/wav","audio/mpeg","audio/mp4","audio/mp3"];class it extends Yo{constructor(e){var t;super(Object.assign(Object.assign({},e),{audioBitsPerSecond:(t=e.audioBitsPerSecond)!==null&&t!==void 0?t:128e3})),this.stream=null,this.mediaRecorder=null}static create(e){return new it(e||{})}renderMicStream(e){const t=new AudioContext,n=t.createMediaStreamSource(e),i=t.createAnalyser();n.connect(i);const s=i.frequencyBinCount,a=new Float32Array(s),u=s/t.sampleRate;let l;const d=()=>{i.getFloatTimeDomainData(a),this.wavesurfer&&(this.wavesurfer.options.cursorWidth=0,this.wavesurfer.options.interact=!1,this.wavesurfer.load("",[a],u)),l=requestAnimationFrame(d)};return d(),()=>{cancelAnimationFrame(l),n?.disconnect(),t?.close()}}startMic(e){return Tt(this,void 0,void 0,function*(){let t;try{t=yield navigator.mediaDevices.getUserMedia({audio:!e?.deviceId||{deviceId:e.deviceId}})}catch(i){throw new Error("Error accessing the microphone: "+i.message)}const n=this.renderMicStream(t);return this.subscriptions.push(this.once("destroy",n)),this.stream=t,t})}stopMic(){this.stream&&(this.stream.getTracks().forEach(e=>e.stop()),this.stream=null,this.mediaRecorder=null)}startRecording(e){return Tt(this,void 0,void 0,function*(){const t=this.stream||(yield this.startMic(e)),n=this.mediaRecorder||new MediaRecorder(t,{mimeType:this.options.mimeType||Zo.find(s=>MediaRecorder.isTypeSupported(s)),audioBitsPerSecond:this.options.audioBitsPerSecond});this.mediaRecorder=n,this.stopRecording();const i=[];n.ondataavailable=s=>{s.data.size>0&&i.push(s.data)},n.onstop=()=>{var s;const a=new Blob(i,{type:n.mimeType});this.emit("record-end",a),this.options.renderRecordedAudio!==!1&&((s=this.wavesurfer)===null||s===void 0||s.load(URL.createObjectURL(a)))},n.start(),this.emit("record-start")})}isRecording(){var e;return((e=this.mediaRecorder)===null||e===void 0?void 0:e.state)==="recording"}isPaused(){var e;return((e=this.mediaRecorder)===null||e===void 0?void 0:e.state)==="paused"}stopRecording(){var e;this.isRecording()&&((e=this.mediaRecorder)===null||e===void 0||e.stop())}pauseRecording(){var e;this.isRecording()&&((e=this.mediaRecorder)===null||e===void 0||e.pause(),this.emit("record-pause"))}resumeRecording(){var e;this.isPaused()&&((e=this.mediaRecorder)===null||e===void 0||e.resume(),this.emit("record-resume"))}static getAvailableAudioDevices(){return Tt(this,void 0,void 0,function*(){return navigator.mediaDevices.enumerateDevices().then(e=>e.filter(t=>t.kind==="audioinput"))})}destroy(){super.destroy(),this.stopRecording(),this.stopMic()}}const{SvelteComponent:Jo,append:F,attr:G,create_component:Ko,destroy_component:Qo,destroy_each:$o,detach:wt,element:ae,empty:xo,ensure_array_like:gn,init:es,insert:yt,listen:Ke,mount_component:ts,run_all:ns,safe_not_equal:is,set_data:Be,set_input_value:Ht,space:Qe,text:Oe,transition_in:os,transition_out:ss}=window.__gradio__svelte__internal,{onMount:rs}=window.__gradio__svelte__internal;function vn(o,e,t){const n=o.slice();return n[14]=e[t],n}function as(o){let e,t=gn(o[2]),n=[];for(let i=0;i{u=document.getElementById("record"),l=document.getElementById("pause"),d=document.getElementById("resume"),r=document.getElementById("stop"),c=document.getElementById("stop-paused")});const h=()=>n.startRecording(),f=()=>{n.isPaused()&&(n.resumeRecording(),n.stopRecording()),n.stopRecording()},m=()=>{n.isPaused()&&(n.resumeRecording(),n.stopRecording()),n.stopRecording()},_=()=>n.pauseRecording(),g=()=>n.resumeRecording();return o.$$set=v=>{"record"in v&&t(0,n=v.record),"i18n"in v&&t(1,i=v.i18n),"dispatch"in v&&t(3,s=v.dispatch)},o.$$.update=()=>{if(o.$$.dirty&10)try{let v=[];it.getAvailableAudioDevices().then(y=>{t(2,a=y),y.forEach(k=>{k.deviceId&&v.push(k)}),t(2,a=v)})}catch(v){throw v instanceof DOMException&&v.name=="NotAllowedError"&&s("error",i("audio.allow_recording_access")),v}o.$$.dirty&1&&n.on("record-start",()=>{n.startMic(),u.style.display="none",r.style.display="flex",l.style.display="block"}),o.$$.dirty&1&&n.on("record-end",()=>{n.isPaused()&&(n.resumeRecording(),n.stopRecording()),n.stopMic(),u.style.display="flex",r.style.display="none",l.style.display="none",u.disabled=!1}),o.$$.dirty&1&&n.on("record-pause",()=>{l.style.display="none",d.style.display="block",r.style.display="none",c.style.display="flex"}),o.$$.dirty&1&&n.on("record-resume",()=>{l.style.display="block",d.style.display="none",u.style.display="none",r.style.display="flex",c.style.display="none"})},[n,i,a,s,h,f,m,_,g]}class cs extends Jo{constructor(e){super(),es(this,e,ds,us,is,{record:0,i18n:1,dispatch:3})}}const{SvelteComponent:hs,add_flush_callback:ht,append:ne,attr:Z,bind:ft,binding_callbacks:Re,check_outros:wn,create_component:Xn,destroy_component:Gn,detach:ot,element:he,group_outros:yn,init:fs,insert:st,mount_component:Yn,noop:ms,safe_not_equal:_s,set_data:Zn,space:Ie,text:Jn,transition_in:ve,transition_out:Ne}=window.__gradio__svelte__internal,{onMount:ps}=window.__gradio__svelte__internal;function kn(o){let e,t,n,i,s,a=o[0]==="edit"&&o[16]>0&&Cn(o);function u(r,c){return r[15]?vs:gs}let l=u(o),d=l(o);return{c(){e=he("div"),t=he("time"),t.textContent="0:00",n=Ie(),i=he("div"),a&&a.c(),s=Ie(),d.c(),Z(t,"id","time"),Z(t,"class","svelte-imtedr"),Z(e,"id","timestamps"),Z(e,"class","svelte-imtedr")},m(r,c){st(r,e,c),ne(e,t),o[22](t),ne(e,n),ne(e,i),a&&a.m(i,null),ne(i,s),d.m(i,null)},p(r,c){r[0]==="edit"&&r[16]>0?a?a.p(r,c):(a=Cn(r),a.c(),a.m(i,s)):a&&(a.d(1),a=null),l===(l=u(r))&&d?d.p(r,c):(d.d(1),d=l(r),d&&(d.c(),d.m(i,null)))},d(r){r&&ot(e),o[22](null),a&&a.d(),d.d()}}}function Cn(o){let e,t=o[17](o[16])+"",n;return{c(){e=he("time"),n=Jn(t),Z(e,"id","trim-duration"),Z(e,"class","svelte-imtedr")},m(i,s){st(i,e,s),ne(e,n)},p(i,s){s&65536&&t!==(t=i[17](i[16])+"")&&Zn(n,t)},d(i){i&&ot(e)}}}function gs(o){let e;return{c(){e=he("time"),e.textContent="0:00",Z(e,"id","duration"),Z(e,"class","svelte-imtedr")},m(t,n){st(t,e,n),o[23](e)},p:ms,d(t){t&&ot(e),o[23](null)}}}function vs(o){let e,t=o[17](o[14])+"",n;return{c(){e=he("time"),n=Jn(t),Z(e,"id","duration"),Z(e,"class","svelte-imtedr")},m(i,s){st(i,e,s),ne(e,n)},p(i,s){s&16384&&t!==(t=i[17](i[14])+"")&&Zn(n,t)},d(i){i&&ot(e)}}}function En(o){let e,t,n;function i(a){o[24](a)}let s={i18n:o[1],dispatch:o[2]};return o[6]!==void 0&&(s.record=o[6]),e=new cs({props:s}),Re.push(()=>ft(e,"record",i)),{c(){Xn(e.$$.fragment)},m(a,u){Yn(e,a,u),n=!0},p(a,u){const l={};u&2&&(l.i18n=a[1]),u&4&&(l.dispatch=a[2]),!t&&u&64&&(t=!0,l.record=a[6],ht(()=>t=!1)),e.$set(l)},i(a){n||(ve(e.$$.fragment,a),n=!0)},o(a){Ne(e.$$.fragment,a),n=!1},d(a){Gn(e,a)}}}function Rn(o){let e,t,n,i,s;function a(r){o[25](r)}function u(r){o[26](r)}function l(r){o[27](r)}let d={container:o[12],playing:o[11],audioDuration:o[13],i18n:o[1],interactive:!0,handle_trim_audio:o[18],showRedo:!0,handle_reset_value:o[4],waveform_settings:o[3]};return o[5]!==void 0&&(d.waveform=o[5]),o[16]!==void 0&&(d.trimDuration=o[16]),o[0]!==void 0&&(d.mode=o[0]),e=new qn({props:d}),Re.push(()=>ft(e,"waveform",a)),Re.push(()=>ft(e,"trimDuration",u)),Re.push(()=>ft(e,"mode",l)),{c(){Xn(e.$$.fragment)},m(r,c){Yn(e,r,c),s=!0},p(r,c){const h={};c&4096&&(h.container=r[12]),c&2048&&(h.playing=r[11]),c&8192&&(h.audioDuration=r[13]),c&2&&(h.i18n=r[1]),c&16&&(h.handle_reset_value=r[4]),c&8&&(h.waveform_settings=r[3]),!t&&c&32&&(t=!0,h.waveform=r[5],ht(()=>t=!1)),!n&&c&65536&&(n=!0,h.trimDuration=r[16],ht(()=>n=!1)),!i&&c&1&&(i=!0,h.mode=r[0],ht(()=>i=!1)),e.$set(h)},i(r){s||(ve(e.$$.fragment,r),s=!0)},o(r){Ne(e.$$.fragment,r),s=!1},d(r){Gn(e,r)}}}function bs(o){let e,t,n,i,s,a,u,l,d=(o[15]||o[7])&&kn(o),r=o[10]&&!o[7]&&En(o),c=o[5]&&o[7]&&Rn(o);return{c(){e=he("div"),t=he("div"),n=Ie(),i=he("div"),s=Ie(),d&&d.c(),a=Ie(),r&&r.c(),u=Ie(),c&&c.c(),Z(t,"id","microphone"),Z(t,"data-testid","microphone-waveform"),Z(t,"class","svelte-imtedr"),Z(i,"id","recording"),Z(e,"class","component-wrapper svelte-imtedr")},m(h,f){st(h,e,f),ne(e,t),ne(e,n),ne(e,i),o[21](i),ne(e,s),d&&d.m(e,null),ne(e,a),r&&r.m(e,null),ne(e,u),c&&c.m(e,null),l=!0},p(h,[f]){h[15]||h[7]?d?d.p(h,f):(d=kn(h),d.c(),d.m(e,a)):d&&(d.d(1),d=null),h[10]&&!h[7]?r?(r.p(h,f),f&1152&&ve(r,1)):(r=En(h),r.c(),ve(r,1),r.m(e,u)):r&&(yn(),Ne(r,1,1,()=>{r=null}),wn()),h[5]&&h[7]?c?(c.p(h,f),f&160&&ve(c,1)):(c=Rn(h),c.c(),ve(c,1),c.m(e,null)):c&&(yn(),Ne(c,1,1,()=>{c=null}),wn())},i(h){l||(ve(r),ve(c),l=!0)},o(h){Ne(r),Ne(c),l=!1},d(h){h&&ot(e),o[21](null),d&&d.d(),r&&r.d(),c&&c.d()}}}function ws(o,e,t){let{mode:n}=e,{i18n:i}=e,{dispatch:s}=e,{dispatch_blob:a}=e,{waveform_settings:u={}}=e,{handle_reset_value:l}=e,d,r,c=!1,h,f,m=null,_,g,v,y=0,k,D=!1,M=0;const A=()=>{clearInterval(k),t(20,k=setInterval(()=>{t(14,y++,y)},1e3))},E=w=>{const j=Math.floor(w/60),re=`0${Math.round(w)%60}`.slice(-2);return`${j}:${re}`},P=()=>{const w=document.getElementById("microphone");w&&(w.innerHTML=""),d!==void 0&&d.destroy(),w&&(t(10,d=He.create({...u,container:w})),t(6,f=d.registerPlugin(it.create())),f.startMic())},T=()=>{let w=document.getElementById("recording");!m||!w||t(5,r=He.create({container:w,url:m,...u}))},I=async(w,j)=>{t(0,n="edit");const oe=r.getDecodedData();oe&&await Bt(oe,w,j).then(async re=>{await a([re],"change"),r.destroy(),T()}),s("edit")};ps(()=>{P(),window.addEventListener("keydown",w=>{w.key==="ArrowRight"?mt(r,.1):w.key==="ArrowLeft"&&mt(r,-.1)})});function B(w){Re[w?"unshift":"push"](()=>{h=w,t(12,h)})}function W(w){Re[w?"unshift":"push"](()=>{_=w,t(8,_),t(5,r)})}function b(w){Re[w?"unshift":"push"](()=>{g=w,t(9,g),t(5,r)})}function L(w){f=w,t(6,f)}function S(w){r=w,t(5,r)}function p(w){M=w,t(16,M)}function z(w){n=w,t(0,n)}return o.$$set=w=>{"mode"in w&&t(0,n=w.mode),"i18n"in w&&t(1,i=w.i18n),"dispatch"in w&&t(2,s=w.dispatch),"dispatch_blob"in w&&t(19,a=w.dispatch_blob),"waveform_settings"in w&&t(3,u=w.waveform_settings),"handle_reset_value"in w&&t(4,l=w.handle_reset_value)},o.$$.update=()=>{o.$$.dirty&68&&f?.on("record-start",()=>{A(),t(15,D=!0),s("start_recording");let w=document.getElementById("microphone");w&&(w.style.display="block")}),o.$$.dirty&1572932&&f?.on("record-end",async w=>{t(14,y=0),t(15,D=!1),clearInterval(k),s("stop_recording");const j=await w.arrayBuffer(),re=await new AudioContext().decodeAudioData(j);re&&await Bt(re).then(async Ze=>{await a([Ze],"change")})}),o.$$.dirty&1048644&&f?.on("record-pause",()=>{s("pause_recording"),clearInterval(k)}),o.$$.dirty&64&&f?.on("record-resume",()=>{A()}),o.$$.dirty&544&&r?.on("decode",w=>{t(13,v=w),g&&t(9,g.textContent=E(w),g)}),o.$$.dirty&288&&r?.on("timeupdate",w=>_&&t(8,_.textContent=E(w),_)),o.$$.dirty&36&&r?.on("pause",()=>{s("pause"),t(11,c=!1)}),o.$$.dirty&36&&r?.on("play",()=>{s("play"),t(11,c=!0)}),o.$$.dirty&36&&r?.on("finish",()=>{s("stop"),s("end"),t(11,c=!1)}),o.$$.dirty&192&&f?.on("record-end",w=>{t(7,m=URL.createObjectURL(w));const j=document.getElementById("microphone"),oe=document.getElementById("recording");j&&(j.style.display="none"),oe&&m&&(oe.innerHTML="",T())})},[n,i,s,u,l,r,f,m,_,g,d,c,h,v,y,D,M,E,I,a,k,B,W,b,L,S,p,z]}class ys extends hs{constructor(e){super(),fs(this,e,ws,bs,_s,{mode:0,i18n:1,dispatch:2,dispatch_blob:19,waveform_settings:3,handle_reset_value:4})}}const{SvelteComponent:ks,append:ye,attr:Se,detach:Gt,element:Fe,init:Cs,insert:Yt,listen:Kn,noop:Sn,null_to_empty:Dn,safe_not_equal:Es,set_data:Qn,set_style:Mn,space:Zt,text:$n}=window.__gradio__svelte__internal,{onMount:Rs}=window.__gradio__svelte__internal;function Ss(o){let e,t,n,i=o[4]("audio.record")+"",s,a,u;return{c(){e=Fe("button"),t=Fe("span"),t.innerHTML=' ',n=Zt(),s=$n(i),Se(t,"class","record-icon"),Se(e,"class","record-button svelte-16e5vwh")},m(l,d){Yt(l,e,d),ye(e,t),ye(e,n),ye(e,s),a||(u=Kn(e,"click",o[8]),a=!0)},p(l,d){d&16&&i!==(i=l[4]("audio.record")+"")&&Qn(s,i)},d(l){l&&Gt(e),a=!1,u()}}}function Ds(o){let e,t,n,i=(o[1]?o[4]("audio.pause"):o[4]("audio.stop"))+"",s,a,u,l;return{c(){e=Fe("button"),t=Fe("span"),t.innerHTML=' ',n=Zt(),s=$n(i),Se(t,"class","record-icon"),Se(e,"class",a=Dn(o[1]?"stop-button-paused":"stop-button")+" svelte-16e5vwh")},m(d,r){Yt(d,e,r),ye(e,t),ye(e,n),ye(e,s),u||(l=Kn(e,"click",o[7]),u=!0)},p(d,r){r&18&&i!==(i=(d[1]?d[4]("audio.pause"):d[4]("audio.stop"))+"")&&Qn(s,i),r&2&&a!==(a=Dn(d[1]?"stop-button-paused":"stop-button")+" svelte-16e5vwh")&&Se(e,"class",a)},d(d){d&&Gt(e),u=!1,l()}}}function Ms(o){let e,t,n;function i(u,l){return u[0]?Ds:Ss}let s=i(o),a=s(o);return{c(){e=Fe("div"),t=Fe("div"),n=Zt(),a.c(),Se(t,"id","microphone"),Mn(t,"display",o[0]?"block":"none"),Se(e,"class","mic-wrap svelte-16e5vwh")},m(u,l){Yt(u,e,l),ye(e,t),ye(e,n),a.m(e,null)},p(u,[l]){l&1&&Mn(t,"display",u[0]?"block":"none"),s===(s=i(u))&&a?a.p(u,l):(a.d(1),a=s(u),a&&(a.c(),a.m(e,null)))},i:Sn,o:Sn,d(u){u&&Gt(e),a.d()}}}function Ls(o,e,t){let{recording:n=!1}=e,{paused_recording:i=!1}=e,{stop:s}=e,{record:a}=e,{i18n:u}=e,{waveform_settings:l={}}=e,d,r;Rs(()=>{c()});const c=()=>{d!==void 0&&d.destroy(),d=He.create({...l,height:100,container:"#microphone"}),t(5,r=d.registerPlugin(it.create()))},h=()=>{r.stopMic(),s()},f=()=>{r.startMic(),a()};return o.$$set=m=>{"recording"in m&&t(0,n=m.recording),"paused_recording"in m&&t(1,i=m.paused_recording),"stop"in m&&t(2,s=m.stop),"record"in m&&t(3,a=m.record),"i18n"in m&&t(4,u=m.i18n),"waveform_settings"in m&&t(6,l=m.waveform_settings)},[n,i,s,a,u,r,l,h,f]}class As extends ks{constructor(e){super(),Cs(this,e,Ls,Ms,Es,{recording:0,paused_recording:1,stop:2,record:3,i18n:4,waveform_settings:6})}}const{SvelteComponent:Ps,add_flush_callback:Jt,append:Wt,attr:$e,bind:Kt,binding_callbacks:Qt,check_outros:_t,create_component:le,create_slot:Ts,destroy_component:ue,detach:fe,element:zt,empty:$t,get_all_dirty_from_scope:Ws,get_slot_changes:zs,group_outros:pt,init:Bs,insert:me,listen:Ln,mount_component:de,noop:Os,run_all:Is,safe_not_equal:Ns,space:Ve,transition_in:q,transition_out:U,update_slot_base:Hs}=window.__gradio__svelte__internal,{onDestroy:qs,createEventDispatcher:Us}=window.__gradio__svelte__internal;function js(o){let e,t,n,i,s;e=new qt({props:{i18n:o[9],absolute:!0}}),e.$on("clear",o[17]),e.$on("edit",o[27]);function a(l){o[28](l)}let u={value:o[1],label:o[3],autoplay:o[8],i18n:o[9],dispatch:o[14],dispatch_blob:o[15],waveform_settings:o[10],handle_reset_value:o[11],interactive:!0};return o[13]!==void 0&&(u.mode=o[13]),n=new Fn({props:u}),Qt.push(()=>Kt(n,"mode",a)),{c(){le(e.$$.fragment),t=Ve(),le(n.$$.fragment)},m(l,d){de(e,l,d),me(l,t,d),de(n,l,d),s=!0},p(l,d){const r={};d[0]&512&&(r.i18n=l[9]),e.$set(r);const c={};d[0]&2&&(c.value=l[1]),d[0]&8&&(c.label=l[3]),d[0]&256&&(c.autoplay=l[8]),d[0]&512&&(c.i18n=l[9]),d[0]&1024&&(c.waveform_settings=l[10]),d[0]&2048&&(c.handle_reset_value=l[11]),!i&&d[0]&8192&&(i=!0,c.mode=l[13],Jt(()=>i=!1)),n.$set(c)},i(l){s||(q(e.$$.fragment,l),q(n.$$.fragment,l),s=!0)},o(l){U(e.$$.fragment,l),U(n.$$.fragment,l),s=!1},d(l){l&&fe(t),ue(e,l),ue(n,l)}}}function Fs(o){let e,t,n,i;const s=[Xs,Vs],a=[];function u(l,d){return l[2]==="microphone"?0:l[2]==="upload"?1:-1}return~(e=u(o))&&(t=a[e]=s[e](o)),{c(){t&&t.c(),n=$t()},m(l,d){~e&&a[e].m(l,d),me(l,n,d),i=!0},p(l,d){let r=e;e=u(l),e===r?~e&&a[e].p(l,d):(t&&(pt(),U(a[r],1,1,()=>{a[r]=null}),_t()),~e?(t=a[e],t?t.p(l,d):(t=a[e]=s[e](l),t.c()),q(t,1),t.m(n.parentNode,n)):t=null)},i(l){i||(q(t),i=!0)},o(l){U(t),i=!1},d(l){l&&fe(n),~e&&a[e].d(l)}}}function Vs(o){let e,t,n,i,s;e=new qt({props:{i18n:o[9],absolute:!0}}),e.$on("clear",o[17]);function a(l){o[26](l)}let u={filetype:"audio/aac,audio/midi,audio/mpeg,audio/ogg,audio/wav,audio/x-wav,audio/opus,audio/webm,audio/flac,audio/vnd.rn-realaudio,audio/x-ms-wma,audio/x-aiff,audio/amr,audio/*",root:o[4],$$slots:{default:[Gs]},$$scope:{ctx:o}};return o[0]!==void 0&&(u.dragging=o[0]),n=new pi({props:u}),Qt.push(()=>Kt(n,"dragging",a)),n.$on("load",o[18]),{c(){le(e.$$.fragment),t=Ve(),le(n.$$.fragment)},m(l,d){de(e,l,d),me(l,t,d),de(n,l,d),s=!0},p(l,d){const r={};d[0]&512&&(r.i18n=l[9]),e.$set(r);const c={};d[0]&16&&(c.root=l[4]),d[1]&1&&(c.$$scope={dirty:d,ctx:l}),!i&&d[0]&1&&(i=!0,c.dragging=l[0],Jt(()=>i=!1)),n.$set(c)},i(l){s||(q(e.$$.fragment,l),q(n.$$.fragment,l),s=!0)},o(l){U(e.$$.fragment,l),U(n.$$.fragment,l),s=!1},d(l){l&&fe(t),ue(e,l),ue(n,l)}}}function Xs(o){let e,t,n,i,s,a;e=new qt({props:{i18n:o[9],absolute:!0}}),e.$on("clear",o[17]);const u=[Zs,Ys],l=[];function d(r,c){return r[7]?0:1}return n=d(o),i=l[n]=u[n](o),{c(){le(e.$$.fragment),t=Ve(),i.c(),s=$t()},m(r,c){de(e,r,c),me(r,t,c),l[n].m(r,c),me(r,s,c),a=!0},p(r,c){const h={};c[0]&512&&(h.i18n=r[9]),e.$set(h);let f=n;n=d(r),n===f?l[n].p(r,c):(pt(),U(l[f],1,1,()=>{l[f]=null}),_t(),i=l[n],i?i.p(r,c):(i=l[n]=u[n](r),i.c()),q(i,1),i.m(s.parentNode,s))},i(r){a||(q(e.$$.fragment,r),q(i),a=!0)},o(r){U(e.$$.fragment,r),U(i),a=!1},d(r){r&&(fe(t),fe(s)),ue(e,r),l[n].d(r)}}}function Gs(o){let e;const t=o[24].default,n=Ts(t,o,o[31],null);return{c(){n&&n.c()},m(i,s){n&&n.m(i,s),e=!0},p(i,s){n&&n.p&&(!e||s[1]&1)&&Hs(n,t,i,i[31],e?zs(t,i[31],s,null):Ws(i[31]),null)},i(i){e||(q(n,i),e=!0)},o(i){U(n,i),e=!1},d(i){n&&n.d(i)}}}function Ys(o){let e,t,n;function i(a){o[25](a)}let s={i18n:o[9],dispatch:o[14],dispatch_blob:o[15],waveform_settings:o[10],handle_reset_value:o[11]};return o[13]!==void 0&&(s.mode=o[13]),e=new ys({props:s}),Qt.push(()=>Kt(e,"mode",i)),{c(){le(e.$$.fragment)},m(a,u){de(e,a,u),n=!0},p(a,u){const l={};u[0]&512&&(l.i18n=a[9]),u[0]&1024&&(l.waveform_settings=a[10]),u[0]&2048&&(l.handle_reset_value=a[11]),!t&&u[0]&8192&&(t=!0,l.mode=a[13],Jt(()=>t=!1)),e.$set(l)},i(a){n||(q(e.$$.fragment,a),n=!0)},o(a){U(e.$$.fragment,a),n=!1},d(a){ue(e,a)}}}function Zs(o){let e,t;return e=new As({props:{record:o[16],recording:o[12],stop:o[19],i18n:o[9],waveform_settings:o[10]}}),{c(){le(e.$$.fragment)},m(n,i){de(e,n,i),t=!0},p(n,i){const s={};i[0]&4096&&(s.recording=n[12]),i[0]&512&&(s.i18n=n[9]),i[0]&1024&&(s.waveform_settings=n[10]),e.$set(s)},i(n){t||(q(e.$$.fragment,n),t=!0)},o(n){U(e.$$.fragment,n),t=!1},d(n){ue(e,n)}}}function An(o){let e,t,n,i,s,a,u,l,d;return n=new gi({}),a=new Ui({}),{c(){e=zt("span"),t=zt("button"),le(n.$$.fragment),i=Ve(),s=zt("button"),le(a.$$.fragment),$e(t,"class","icon svelte-10shjqk"),$e(t,"aria-label","Upload audio"),$e(s,"class","icon svelte-10shjqk"),$e(s,"aria-label","Record audio"),$e(e,"class","source-selection svelte-10shjqk")},m(r,c){me(r,e,c),Wt(e,t),de(n,t,null),Wt(e,i),Wt(e,s),de(a,s,null),u=!0,l||(d=[Ln(t,"click",o[29]),Ln(s,"click",o[30])],l=!0)},p:Os,i(r){u||(q(n.$$.fragment,r),q(a.$$.fragment,r),u=!0)},o(r){U(n.$$.fragment,r),U(a.$$.fragment,r),u=!1},d(r){r&&fe(e),ue(n),ue(a),l=!1,Is(d)}}}function Js(o){let e,t,n,i,s,a,u;e=new Wn({props:{show_label:o[5],Icon:gt,float:o[2]==="upload"&&o[1]===null,label:o[3]||o[9]("audio.audio")}});const l=[Fs,js],d=[];function r(h,f){return h[1]===null||h[7]?0:1}n=r(o),i=d[n]=l[n](o);let c=o[6].length>1&&An(o);return{c(){le(e.$$.fragment),t=Ve(),i.c(),s=Ve(),c&&c.c(),a=$t()},m(h,f){de(e,h,f),me(h,t,f),d[n].m(h,f),me(h,s,f),c&&c.m(h,f),me(h,a,f),u=!0},p(h,f){const m={};f[0]&32&&(m.show_label=h[5]),f[0]&6&&(m.float=h[2]==="upload"&&h[1]===null),f[0]&520&&(m.label=h[3]||h[9]("audio.audio")),e.$set(m);let _=n;n=r(h),n===_?d[n].p(h,f):(pt(),U(d[_],1,1,()=>{d[_]=null}),_t(),i=d[n],i?i.p(h,f):(i=d[n]=l[n](h),i.c()),q(i,1),i.m(s.parentNode,s)),h[6].length>1?c?(c.p(h,f),f[0]&64&&q(c,1)):(c=An(h),c.c(),q(c,1),c.m(a.parentNode,a)):c&&(pt(),U(c,1,1,()=>{c=null}),_t())},i(h){u||(q(e.$$.fragment,h),q(i),q(c),u=!0)},o(h){U(e.$$.fragment,h),U(i),U(c),u=!1},d(h){h&&(fe(t),fe(s),fe(a)),ue(e,h),d[n].d(h),c&&c.d(h)}}}const Ks=500,Pn=44;function Qs(o,e,t){let{$$slots:n={},$$scope:i}=e,{value:s=null}=e,{label:a}=e,{root:u}=e,{show_label:l=!0}=e,{sources:d=["microphone","upload"]}=e,{pending:r=!1}=e,{streaming:c=!1}=e,{autoplay:h=!1}=e,{i18n:f}=e,{waveform_settings:m={}}=e,{dragging:_}=e,{active_source:g}=e,{handle_reset_value:v=()=>{}}=e,y=!1,k,D="",M,A=[],E=!1,P=!1,T=[],I;function B(){I=[tn(()=>import("./module-94200622.js"),["assets/module-94200622.js","assets/Index-37584f50.js","assets/index-0526d562.js","assets/index-02e0d00d.css","assets/Index-5cf1892e.css"]),tn(()=>import("./module-1791af61.js"),[])]}c&&B();const W=Us(),b=async(R,X)=>{let ce=new File(R,"audio.wav");const ke=await fi([ce],X==="stream");t(1,s=(await mi(ke,u))?.filter(Boolean)[0]),W(X,s)};qs(()=>{c&&k&&k.state!=="inactive"&&k.stop()});async function L(){let R;try{R=await navigator.mediaDevices.getUserMedia({audio:!0})}catch(X){if(!navigator.mediaDevices){W("error",f("audio.no_device_support"));return}if(X instanceof DOMException&&X.name=="NotAllowedError"){W("error",f("audio.allow_recording_access"));return}throw X}if(R!=null){if(c){const[{MediaRecorder:X,register:ce},{connect:ke}]=await Promise.all(I);await ce(await ke()),k=new X(R,{mimeType:"audio/wav"}),k.addEventListener("dataavailable",S)}else k=new MediaRecorder(R),k.addEventListener("dataavailable",X=>{T.push(X.data)}),k.addEventListener("stop",async()=>{t(12,y=!1),await b(T,"change"),await b(T,"stop_recording"),T=[]});P=!0}}async function S(R){let X=await R.data.arrayBuffer(),ce=new Uint8Array(X);if(M||(t(21,M=new Uint8Array(X.slice(0,Pn))),ce=new Uint8Array(X.slice(Pn))),r)A.push(ce);else{let ke=[M].concat(A,[ce]);b(ke,"stream"),t(22,A=[])}}async function p(){t(12,y=!0),W("start_recording"),P||await L(),t(21,M=void 0),c&&k.start(Ks)}function z(){W("change",null),W("clear"),t(13,D=""),t(1,s=null)}function w({detail:R}){t(1,s=R),W("change",R),W("upload",R)}function j(){t(12,y=!1),c&&(W("stop_recording"),k.stop(),r&&t(23,E=!0),b(T,"change"),W("clear"),t(13,D=""))}function oe(R){D=R,t(13,D)}function re(R){_=R,t(0,_)}const Ze=()=>t(13,D="edit");function kt(R){D=R,t(13,D)}const Ct=()=>{z(),t(2,g="upload")},Et=()=>{z(),t(2,g="microphone")};return o.$$set=R=>{"value"in R&&t(1,s=R.value),"label"in R&&t(3,a=R.label),"root"in R&&t(4,u=R.root),"show_label"in R&&t(5,l=R.show_label),"sources"in R&&t(6,d=R.sources),"pending"in R&&t(20,r=R.pending),"streaming"in R&&t(7,c=R.streaming),"autoplay"in R&&t(8,h=R.autoplay),"i18n"in R&&t(9,f=R.i18n),"waveform_settings"in R&&t(10,m=R.waveform_settings),"dragging"in R&&t(0,_=R.dragging),"active_source"in R&&t(2,g=R.active_source),"handle_reset_value"in R&&t(11,v=R.handle_reset_value),"$$scope"in R&&t(31,i=R.$$scope)},o.$$.update=()=>{if(o.$$.dirty[0]&1&&W("drag",_),o.$$.dirty[0]&15728640&&E&&r===!1&&(t(23,E=!1),M&&A)){let R=[M].concat(A);t(22,A=[]),b(R,"stream")}},[_,s,g,a,u,l,d,c,h,f,m,v,y,D,W,b,p,z,w,j,r,M,A,E,n,oe,re,Ze,kt,Ct,Et,i]}class $s extends Ps{constructor(e){super(),Bs(this,e,Qs,Js,Ns,{value:1,label:3,root:4,show_label:5,sources:6,pending:20,streaming:7,autoplay:8,i18n:9,waveform_settings:10,dragging:0,active_source:2,handle_reset_value:11},null,[-1,-1])}}const xs=$s,{SvelteComponent:er,add_flush_callback:tr,assign:xn,bind:nr,binding_callbacks:ir,check_outros:or,create_component:Me,destroy_component:Le,detach:xt,empty:sr,flush:N,get_spread_object:ei,get_spread_update:ti,group_outros:rr,init:ar,insert:en,mount_component:Ae,safe_not_equal:lr,space:ni,transition_in:_e,transition_out:pe}=window.__gradio__svelte__internal;function ur(o){let e,t;return e=new Tn({props:{variant:o[0]===null&&o[20]==="upload"?"dashed":"solid",border_mode:o[21]?"focus":"base",padding:!1,elem_id:o[2],elem_classes:o[3],visible:o[4],container:o[10],scale:o[11],min_width:o[12],$$slots:{default:[hr]},$$scope:{ctx:o}}}),{c(){Me(e.$$.fragment)},m(n,i){Ae(e,n,i),t=!0},p(n,i){const s={};i[0]&1048577&&(s.variant=n[0]===null&&n[20]==="upload"?"dashed":"solid"),i[0]&2097152&&(s.border_mode=n[21]?"focus":"base"),i[0]&4&&(s.elem_id=n[2]),i[0]&8&&(s.elem_classes=n[3]),i[0]&16&&(s.visible=n[4]),i[0]&1024&&(s.container=n[10]),i[0]&2048&&(s.scale=n[11]),i[0]&4096&&(s.min_width=n[12]),i[0]&4137923|i[1]&16384&&(s.$$scope={dirty:i,ctx:n}),e.$set(s)},i(n){t||(_e(e.$$.fragment,n),t=!0)},o(n){pe(e.$$.fragment,n),t=!1},d(n){Le(e,n)}}}function dr(o){let e,t;return e=new Tn({props:{variant:"solid",border_mode:o[21]?"focus":"base",padding:!1,elem_id:o[2],elem_classes:o[3],visible:o[4],container:o[10],scale:o[11],min_width:o[12],$$slots:{default:[fr]},$$scope:{ctx:o}}}),{c(){Me(e.$$.fragment)},m(n,i){Ae(e,n,i),t=!0},p(n,i){const s={};i[0]&2097152&&(s.border_mode=n[21]?"focus":"base"),i[0]&4&&(s.elem_id=n[2]),i[0]&8&&(s.elem_classes=n[3]),i[0]&16&&(s.visible=n[4]),i[0]&1024&&(s.container=n[10]),i[0]&2048&&(s.scale=n[11]),i[0]&4096&&(s.min_width=n[12]),i[0]&844418|i[1]&16384&&(s.$$scope={dirty:i,ctx:n}),e.$set(s)},i(n){t||(_e(e.$$.fragment,n),t=!0)},o(n){pe(e.$$.fragment,n),t=!1},d(n){Le(e,n)}}}function cr(o){let e,t;return e=new vi({props:{i18n:o[18].i18n,type:"audio"}}),{c(){Me(e.$$.fragment)},m(n,i){Ae(e,n,i),t=!0},p(n,i){const s={};i[0]&262144&&(s.i18n=n[18].i18n),e.$set(s)},i(n){t||(_e(e.$$.fragment,n),t=!0)},o(n){pe(e.$$.fragment,n),t=!1},d(n){Le(e,n)}}}function hr(o){let e,t,n,i,s;const a=[{autoscroll:o[18].autoscroll},{i18n:o[18].i18n},o[1]];let u={};for(let r=0;rnr(n,"dragging",l)),n.$on("change",o[31]),n.$on("stream",o[32]),n.$on("drag",o[33]),n.$on("edit",o[34]),n.$on("play",o[35]),n.$on("pause",o[36]),n.$on("stop",o[37]),n.$on("end",o[38]),n.$on("start_recording",o[39]),n.$on("pause_recording",o[40]),n.$on("stop_recording",o[41]),n.$on("upload",o[42]),n.$on("clear",o[43]),n.$on("error",o[44]),{c(){Me(e.$$.fragment),t=ni(),Me(n.$$.fragment)},m(r,c){Ae(e,r,c),en(r,t,c),Ae(n,r,c),s=!0},p(r,c){const h=c[0]&262146?ti(a,[c[0]&262144&&{autoscroll:r[18].autoscroll},c[0]&262144&&{i18n:r[18].i18n},c[0]&2&&ei(r[1])]):{};e.$set(h);const f={};c[0]&128&&(f.label=r[7]),c[0]&512&&(f.show_label=r[9]),c[0]&524288&&(f.value=r[19]),c[0]&256&&(f.root=r[8]),c[0]&64&&(f.sources=r[6]),c[0]&1048576&&(f.active_source=r[20]),c[0]&65536&&(f.pending=r[16]),c[0]&131072&&(f.streaming=r[17]),c[0]&8192&&(f.autoplay=r[13]),c[0]&262144&&(f.i18n=r[18].i18n),c[0]&262144|c[1]&16384&&(f.$$scope={dirty:c,ctx:r}),!i&&c[0]&2097152&&(i=!0,f.dragging=r[21],tr(()=>i=!1)),n.$set(f)},i(r){s||(_e(e.$$.fragment,r),_e(n.$$.fragment,r),s=!0)},o(r){pe(e.$$.fragment,r),pe(n.$$.fragment,r),s=!1},d(r){r&&xt(t),Le(e,r),Le(n,r)}}}function fr(o){let e,t,n,i;const s=[{autoscroll:o[18].autoscroll},{i18n:o[18].i18n},o[1]];let a={};for(let u=0;u{a[r]=null}),or(),t=a[e],t?t.p(l,d):(t=a[e]=s[e](l),t.c()),_e(t,1),t.m(n.parentNode,n))},i(l){i||(_e(t),i=!0)},o(l){pe(t),i=!1},d(l){l&&xt(n),a[e].d(l)}}}function _r(o,e,t){let{elem_id:n=""}=e,{elem_classes:i=[]}=e,{visible:s=!0}=e,{interactive:a}=e,{value:u=null}=e,{sources:l}=e,{label:d}=e,{root:r}=e,{show_label:c}=e,{proxy_url:h}=e,{container:f=!0}=e,{scale:m=null}=e,{min_width:_=void 0}=e,{loading_status:g}=e,{autoplay:v=!1}=e,{show_download_button:y=!0}=e,{show_share_button:k=!1}=e,{waveform_options:D={}}=e,{pending:M}=e,{streaming:A}=e,{gradio:E}=e,P=null,T,I,B=u;const W=()=>{B===null||u===B||t(0,u=B)};let b;const L={height:50,waveColor:D.waveform_color||"#9ca3af",progressColor:D.waveform_progress_color||"#f97316",barWidth:2,barGap:3,barHeight:4,cursorWidth:2,cursorColor:"#ddd5e9",barRadius:10,dragToSeek:!0,mediaControls:D.show_controls},S=C=>E.dispatch("share",C.detail),p=C=>E.dispatch("error",C.detail);function z(C){b=C,t(21,b)}const w=({detail:C})=>t(0,u=C),j=({detail:C})=>{t(0,u=C),E.dispatch("stream",u)},oe=({detail:C})=>t(21,b=C),re=()=>E.dispatch("edit"),Ze=()=>E.dispatch("play"),kt=()=>E.dispatch("pause"),Ct=()=>E.dispatch("stop"),Et=()=>E.dispatch("end"),R=()=>E.dispatch("start_recording"),X=()=>E.dispatch("pause_recording"),ce=()=>E.dispatch("stop_recording"),ke=()=>E.dispatch("upload"),ii=()=>E.dispatch("clear"),oi=({detail:C})=>{t(1,g=g||{}),t(1,g.status="error",g),E.dispatch("error",C)};return o.$$set=C=>{"elem_id"in C&&t(2,n=C.elem_id),"elem_classes"in C&&t(3,i=C.elem_classes),"visible"in C&&t(4,s=C.visible),"interactive"in C&&t(5,a=C.interactive),"value"in C&&t(0,u=C.value),"sources"in C&&t(6,l=C.sources),"label"in C&&t(7,d=C.label),"root"in C&&t(8,r=C.root),"show_label"in C&&t(9,c=C.show_label),"proxy_url"in C&&t(24,h=C.proxy_url),"container"in C&&t(10,f=C.container),"scale"in C&&t(11,m=C.scale),"min_width"in C&&t(12,_=C.min_width),"loading_status"in C&&t(1,g=C.loading_status),"autoplay"in C&&t(13,v=C.autoplay),"show_download_button"in C&&t(14,y=C.show_download_button),"show_share_button"in C&&t(15,k=C.show_share_button),"waveform_options"in C&&t(25,D=C.waveform_options),"pending"in C&&t(16,M=C.pending),"streaming"in C&&t(17,A=C.streaming),"gradio"in C&&t(18,E=C.gradio)},o.$$.update=()=>{o.$$.dirty[0]&16777473&&t(19,T=_i(u,r,h)),o.$$.dirty[0]&134217729&&u&&B===null&&t(27,B=u),o.$$.dirty[0]&67371009&&JSON.stringify(u)!==JSON.stringify(P)&&(t(26,P=u),E.dispatch("change")),o.$$.dirty[0]&64&&l&&t(20,I=l[0])},[u,g,n,i,s,a,l,d,r,c,f,m,_,v,y,k,M,A,E,T,I,b,W,L,h,D,P,B,S,p,z,w,j,oe,re,Ze,kt,Ct,Et,R,X,ce,ke,ii,oi]}class pr extends er{constructor(e){super(),ar(this,e,_r,mr,lr,{elem_id:2,elem_classes:3,visible:4,interactive:5,value:0,sources:6,label:7,root:8,show_label:9,proxy_url:24,container:10,scale:11,min_width:12,loading_status:1,autoplay:13,show_download_button:14,show_share_button:15,waveform_options:25,pending:16,streaming:17,gradio:18},null,[-1,-1])}get elem_id(){return this.$$.ctx[2]}set elem_id(e){this.$$set({elem_id:e}),N()}get elem_classes(){return this.$$.ctx[3]}set elem_classes(e){this.$$set({elem_classes:e}),N()}get visible(){return this.$$.ctx[4]}set visible(e){this.$$set({visible:e}),N()}get interactive(){return this.$$.ctx[5]}set interactive(e){this.$$set({interactive:e}),N()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),N()}get sources(){return this.$$.ctx[6]}set sources(e){this.$$set({sources:e}),N()}get label(){return this.$$.ctx[7]}set label(e){this.$$set({label:e}),N()}get root(){return this.$$.ctx[8]}set root(e){this.$$set({root:e}),N()}get show_label(){return this.$$.ctx[9]}set show_label(e){this.$$set({show_label:e}),N()}get proxy_url(){return this.$$.ctx[24]}set proxy_url(e){this.$$set({proxy_url:e}),N()}get container(){return this.$$.ctx[10]}set container(e){this.$$set({container:e}),N()}get scale(){return this.$$.ctx[11]}set scale(e){this.$$set({scale:e}),N()}get min_width(){return this.$$.ctx[12]}set min_width(e){this.$$set({min_width:e}),N()}get loading_status(){return this.$$.ctx[1]}set loading_status(e){this.$$set({loading_status:e}),N()}get autoplay(){return this.$$.ctx[13]}set autoplay(e){this.$$set({autoplay:e}),N()}get show_download_button(){return this.$$.ctx[14]}set show_download_button(e){this.$$set({show_download_button:e}),N()}get show_share_button(){return this.$$.ctx[15]}set show_share_button(e){this.$$set({show_share_button:e}),N()}get waveform_options(){return this.$$.ctx[25]}set waveform_options(e){this.$$set({waveform_options:e}),N()}get pending(){return this.$$.ctx[16]}set pending(e){this.$$set({pending:e}),N()}get streaming(){return this.$$.ctx[17]}set streaming(e){this.$$set({streaming:e}),N()}get gradio(){return this.$$.ctx[18]}set gradio(e){this.$$set({gradio:e}),N()}}const Or=pr;export{Hr as BaseExample,xs as BaseInteractiveAudio,Fn as BasePlayer,Xo as BaseStaticAudio,Or as default};
-//# sourceMappingURL=index-fef9d5f8.js.map
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f8aef4a9.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f8aef4a9.js
deleted file mode 100644
index d14a477d9c67e7e54080d77d31801ae86690612b..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-f8aef4a9.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{L as e,S as m,S as p}from"./Index-c74a8b7c.js";import{T as i}from"./Blocks-f0dbd8c3.js";import"./index-50ad4c77.js";import"./svelte/svelte.js";import"./Button-8eeccca1.js";export{e as Loader,m as StatusTracker,i as Toast,p as default};
-//# sourceMappingURL=index-f8aef4a9.js.map
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/lfs.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/lfs.py
deleted file mode 100644
index a40951c2a3b6a139786203dc09d28714e7194782..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/lfs.py
+++ /dev/null
@@ -1,202 +0,0 @@
-"""
-Implementation of a custom transfer agent for the transfer type "multipart" for
-git-lfs.
-
-Inspired by:
-github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
-
-Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
-
-
-To launch debugger while developing:
-
-``` [lfs "customtransfer.multipart"]
-path = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678
---wait-for-client
-/path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py
-lfs-multipart-upload ```"""
-
-import json
-import os
-import subprocess
-import sys
-from argparse import _SubParsersAction
-from typing import Dict, List, Optional
-
-from huggingface_hub.commands import BaseHuggingfaceCLICommand
-from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND, SliceFileObj
-
-from ..utils import get_session, hf_raise_for_status, logging
-
-
-logger = logging.get_logger(__name__)
-
-
-class LfsCommands(BaseHuggingfaceCLICommand):
- """
- Implementation of a custom transfer agent for the transfer type "multipart"
- for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom
- transfer agent is:
- https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
-
- This introduces two commands to the CLI:
-
- 1. $ huggingface-cli lfs-enable-largefiles
-
- This should be executed once for each model repo that contains a model file
- >5GB. It's documented in the error message you get if you just try to git
- push a 5GB file without having enabled it before.
-
- 2. $ huggingface-cli lfs-multipart-upload
-
- This command is called by lfs directly and is not meant to be called by the
- user.
- """
-
- @staticmethod
- def register_subcommand(parser: _SubParsersAction):
- enable_parser = parser.add_parser(
- "lfs-enable-largefiles",
- help="Configure your repository to enable upload of files > 5GB.",
- )
- enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
- enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
-
- upload_parser = parser.add_parser(
- LFS_MULTIPART_UPLOAD_COMMAND,
- help="Command will get called by git-lfs, do not call it directly.",
- )
- upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
-
-
-class LfsEnableCommand:
- def __init__(self, args):
- self.args = args
-
- def run(self):
- local_path = os.path.abspath(self.args.path)
- if not os.path.isdir(local_path):
- print("This does not look like a valid git repo.")
- exit(1)
- subprocess.run(
- "git config lfs.customtransfer.multipart.path huggingface-cli".split(),
- check=True,
- cwd=local_path,
- )
- subprocess.run(
- f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
- check=True,
- cwd=local_path,
- )
- print("Local repo set up for largefiles")
-
-
-def write_msg(msg: Dict):
- """Write out the message in Line delimited JSON."""
- msg_str = json.dumps(msg) + "\n"
- sys.stdout.write(msg_str)
- sys.stdout.flush()
-
-
-def read_msg() -> Optional[Dict]:
- """Read Line delimited JSON from stdin."""
- msg = json.loads(sys.stdin.readline().strip())
-
- if "terminate" in (msg.get("type"), msg.get("event")):
- # terminate message received
- return None
-
- if msg.get("event") not in ("download", "upload"):
- logger.critical("Received unexpected message")
- sys.exit(1)
-
- return msg
-
-
-class LfsUploadCommand:
- def __init__(self, args) -> None:
- self.args = args
-
- def run(self) -> None:
- # Immediately after invoking a custom transfer process, git-lfs
- # sends initiation data to the process over stdin.
- # This tells the process useful information about the configuration.
- init_msg = json.loads(sys.stdin.readline().strip())
- if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
- write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
- sys.exit(1)
-
- # The transfer process should use the information it needs from the
- # initiation structure, and also perform any one-off setup tasks it
- # needs to do. It should then respond on stdout with a simple empty
- # confirmation structure, as follows:
- write_msg({})
-
- # After the initiation exchange, git-lfs will send any number of
- # transfer requests to the stdin of the transfer process, in a serial sequence.
- while True:
- msg = read_msg()
- if msg is None:
- # When all transfers have been processed, git-lfs will send
- # a terminate event to the stdin of the transfer process.
- # On receiving this message the transfer process should
- # clean up and terminate. No response is expected.
- sys.exit(0)
-
- oid = msg["oid"]
- filepath = msg["path"]
- completion_url = msg["action"]["href"]
- header = msg["action"]["header"]
- chunk_size = int(header.pop("chunk_size"))
- presigned_urls: List[str] = list(header.values())
-
- # Send a "started" progress event to allow other workers to start.
- # Otherwise they're delayed until first "progress" event is reported,
- # i.e. after the first 5GB by default (!)
- write_msg(
- {
- "event": "progress",
- "oid": oid,
- "bytesSoFar": 1,
- "bytesSinceLast": 0,
- }
- )
-
- parts = []
- with open(filepath, "rb") as file:
- for i, presigned_url in enumerate(presigned_urls):
- with SliceFileObj(
- file,
- seek_from=i * chunk_size,
- read_limit=chunk_size,
- ) as data:
- r = get_session().put(presigned_url, data=data)
- hf_raise_for_status(r)
- parts.append(
- {
- "etag": r.headers.get("etag"),
- "partNumber": i + 1,
- }
- )
- # In order to support progress reporting while data is uploading / downloading,
- # the transfer process should post messages to stdout
- write_msg(
- {
- "event": "progress",
- "oid": oid,
- "bytesSoFar": (i + 1) * chunk_size,
- "bytesSinceLast": chunk_size,
- }
- )
- # Not precise but that's ok.
-
- r = get_session().post(
- completion_url,
- json={
- "oid": oid,
- "parts": parts,
- },
- )
- hf_raise_for_status(r)
-
- write_msg({"event": "complete", "oid": oid})
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp
deleted file mode 100644
index 575eec1188275064169e7ed533535617fc849d55..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-struct ufunc {
- std::string name;
- double (*f32func)(double);
- long double (*f64func)(long double);
- float f32ulp;
- float f64ulp;
-};
-
-template
-T
-RandomFloat(T a, T b)
-{
- T random = ((T)rand()) / (T)RAND_MAX;
- T diff = b - a;
- T r = random * diff;
- return a + r;
-}
-
-template
-void
-append_random_array(std::vector &arr, T min, T max, size_t N)
-{
- for (size_t ii = 0; ii < N; ++ii)
- arr.emplace_back(RandomFloat(min, max));
-}
-
-template
-std::vector
-computeTrueVal(const std::vector &in, T2 (*mathfunc)(T2))
-{
- std::vector out;
- for (T1 elem : in) {
- T2 elem_d = (T2)elem;
- T1 out_elem = (T1)mathfunc(elem_d);
- out.emplace_back(out_elem);
- }
- return out;
-}
-
-/*
- * FP range:
- * [-inf, -maxflt, -1., -minflt, -minden, 0., minden, minflt, 1., maxflt, inf]
- */
-
-#define MINDEN std::numeric_limits::denorm_min()
-#define MINFLT std::numeric_limits::min()
-#define MAXFLT std::numeric_limits::max()
-#define INF std::numeric_limits::infinity()
-#define qNAN std::numeric_limits::quiet_NaN()
-#define sNAN std::numeric_limits::signaling_NaN()
-
-template
-std::vector
-generate_input_vector(std::string func)
-{
- std::vector input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT,
- -MAXFLT, INF, -INF, qNAN, sNAN,
- -1.0, 1.0, 0.0, -0.0};
-
- // [-1.0, 1.0]
- if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) {
- append_random_array(input, -1.0, 1.0, 700);
- }
- // (0.0, INF]
- else if ((func == "log2") || (func == "log10")) {
- append_random_array(input, 0.0, 1.0, 200);
- append_random_array(input, MINDEN, MINFLT, 200);
- append_random_array(input, MINFLT, 1.0, 200);
- append_random_array(input, 1.0, MAXFLT, 200);
- }
- // (-1.0, INF]
- else if (func == "log1p") {
- append_random_array(input, -1.0, 1.0, 200);
- append_random_array(input, -MINFLT, -MINDEN, 100);
- append_random_array(input, -1.0, -MINFLT, 100);
- append_random_array(input, MINDEN, MINFLT, 100);
- append_random_array(input, MINFLT, 1.0, 100);
- append_random_array(input, 1.0, MAXFLT, 100);
- }
- // [1.0, INF]
- else if (func == "arccosh") {
- append_random_array(input, 1.0, 2.0, 400);
- append_random_array(input, 2.0, MAXFLT, 300);
- }
- // [-INF, INF]
- else {
- append_random_array(input, -1.0, 1.0, 100);
- append_random_array(input, MINDEN, MINFLT, 100);
- append_random_array(input, -MINFLT, -MINDEN, 100);
- append_random_array(input, MINFLT, 1.0, 100);
- append_random_array(input, -1.0, -MINFLT, 100);
- append_random_array(input, 1.0, MAXFLT, 100);
- append_random_array(input, -MAXFLT, -100.0, 100);
- }
-
- std::random_shuffle(input.begin(), input.end());
- return input;
-}
-
-int
-main()
-{
- srand(42);
- std::vector umathfunc = {
- {"sin", sin, sin, 1.49, 1.00},
- {"cos", cos, cos, 1.49, 1.00},
- {"tan", tan, tan, 3.91, 3.93},
- {"arcsin", asin, asin, 3.12, 2.55},
- {"arccos", acos, acos, 2.1, 1.67},
- {"arctan", atan, atan, 2.3, 2.52},
- {"sinh", sinh, sinh, 1.55, 1.89},
- {"cosh", cosh, cosh, 2.48, 1.97},
- {"tanh", tanh, tanh, 1.38, 1.19},
- {"arcsinh", asinh, asinh, 1.01, 1.48},
- {"arccosh", acosh, acosh, 1.16, 1.05},
- {"arctanh", atanh, atanh, 1.45, 1.46},
- {"cbrt", cbrt, cbrt, 1.94, 1.82},
- //{"exp",exp,exp,3.76,1.53},
- {"exp2", exp2, exp2, 1.01, 1.04},
- {"expm1", expm1, expm1, 2.62, 2.1},
- //{"log",log,log,1.84,1.67},
- {"log10", log10, log10, 3.5, 1.92},
- {"log1p", log1p, log1p, 1.96, 1.93},
- {"log2", log2, log2, 2.12, 1.84},
- };
-
- for (int ii = 0; ii < umathfunc.size(); ++ii) {
- // ignore sin/cos
- if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) {
- std::string fileName =
- "umath-validation-set-" + umathfunc[ii].name + ".csv";
- std::ofstream txtOut;
- txtOut.open(fileName, std::ofstream::trunc);
- txtOut << "dtype,input,output,ulperrortol" << std::endl;
-
- // Single Precision
- auto f32in = generate_input_vector(umathfunc[ii].name);
- auto f32out = computeTrueVal(f32in,
- umathfunc[ii].f32func);
- for (int jj = 0; jj < f32in.size(); ++jj) {
- txtOut << "np.float32" << std::hex << ",0x"
- << *reinterpret_cast(&f32in[jj]) << ",0x"
- << *reinterpret_cast(&f32out[jj]) << ","
- << ceil(umathfunc[ii].f32ulp) << std::endl;
- }
-
- // Double Precision
- auto f64in = generate_input_vector(umathfunc[ii].name);
- auto f64out = computeTrueVal(
- f64in, umathfunc[ii].f64func);
- for (int jj = 0; jj < f64in.size(); ++jj) {
- txtOut << "np.float64" << std::hex << ",0x"
- << *reinterpret_cast(&f64in[jj]) << ",0x"
- << *reinterpret_cast(&f64out[jj]) << ","
- << ceil(umathfunc[ii].f64ulp) << std::endl;
- }
- txtOut.close();
- }
- }
- return 0;
-}
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py
deleted file mode 100644
index 7899b4aeac3fdef6548f3aadf76ff7718418f089..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py
+++ /dev/null
@@ -1,285 +0,0 @@
-import datetime
-
-import dateutil
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas import (
- DataFrame,
- Series,
-)
-import pandas._testing as tm
-
-
-class TestDataFrameMissingData:
- def test_dropEmptyRows(self, float_frame):
- N = len(float_frame.index)
- mat = np.random.default_rng(2).standard_normal(N)
- mat[:5] = np.nan
-
- frame = DataFrame({"foo": mat}, index=float_frame.index)
- original = Series(mat, index=float_frame.index, name="foo")
- expected = original.dropna()
- inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
-
- smaller_frame = frame.dropna(how="all")
- # check that original was preserved
- tm.assert_series_equal(frame["foo"], original)
- return_value = inplace_frame1.dropna(how="all", inplace=True)
- tm.assert_series_equal(smaller_frame["foo"], expected)
- tm.assert_series_equal(inplace_frame1["foo"], expected)
- assert return_value is None
-
- smaller_frame = frame.dropna(how="all", subset=["foo"])
- return_value = inplace_frame2.dropna(how="all", subset=["foo"], inplace=True)
- tm.assert_series_equal(smaller_frame["foo"], expected)
- tm.assert_series_equal(inplace_frame2["foo"], expected)
- assert return_value is None
-
- def test_dropIncompleteRows(self, float_frame):
- N = len(float_frame.index)
- mat = np.random.default_rng(2).standard_normal(N)
- mat[:5] = np.nan
-
- frame = DataFrame({"foo": mat}, index=float_frame.index)
- frame["bar"] = 5
- original = Series(mat, index=float_frame.index, name="foo")
- inp_frame1, inp_frame2 = frame.copy(), frame.copy()
-
- smaller_frame = frame.dropna()
- tm.assert_series_equal(frame["foo"], original)
- return_value = inp_frame1.dropna(inplace=True)
-
- exp = Series(mat[5:], index=float_frame.index[5:], name="foo")
- tm.assert_series_equal(smaller_frame["foo"], exp)
- tm.assert_series_equal(inp_frame1["foo"], exp)
- assert return_value is None
-
- samesize_frame = frame.dropna(subset=["bar"])
- tm.assert_series_equal(frame["foo"], original)
- assert (frame["bar"] == 5).all()
- return_value = inp_frame2.dropna(subset=["bar"], inplace=True)
- tm.assert_index_equal(samesize_frame.index, float_frame.index)
- tm.assert_index_equal(inp_frame2.index, float_frame.index)
- assert return_value is None
-
- def test_dropna(self):
- df = DataFrame(np.random.default_rng(2).standard_normal((6, 4)))
- df.iloc[:2, 2] = np.nan
-
- dropped = df.dropna(axis=1)
- expected = df.loc[:, [0, 1, 3]]
- inp = df.copy()
- return_value = inp.dropna(axis=1, inplace=True)
- tm.assert_frame_equal(dropped, expected)
- tm.assert_frame_equal(inp, expected)
- assert return_value is None
-
- dropped = df.dropna(axis=0)
- expected = df.loc[list(range(2, 6))]
- inp = df.copy()
- return_value = inp.dropna(axis=0, inplace=True)
- tm.assert_frame_equal(dropped, expected)
- tm.assert_frame_equal(inp, expected)
- assert return_value is None
-
- # threshold
- dropped = df.dropna(axis=1, thresh=5)
- expected = df.loc[:, [0, 1, 3]]
- inp = df.copy()
- return_value = inp.dropna(axis=1, thresh=5, inplace=True)
- tm.assert_frame_equal(dropped, expected)
- tm.assert_frame_equal(inp, expected)
- assert return_value is None
-
- dropped = df.dropna(axis=0, thresh=4)
- expected = df.loc[range(2, 6)]
- inp = df.copy()
- return_value = inp.dropna(axis=0, thresh=4, inplace=True)
- tm.assert_frame_equal(dropped, expected)
- tm.assert_frame_equal(inp, expected)
- assert return_value is None
-
- dropped = df.dropna(axis=1, thresh=4)
- tm.assert_frame_equal(dropped, df)
-
- dropped = df.dropna(axis=1, thresh=3)
- tm.assert_frame_equal(dropped, df)
-
- # subset
- dropped = df.dropna(axis=0, subset=[0, 1, 3])
- inp = df.copy()
- return_value = inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
- tm.assert_frame_equal(dropped, df)
- tm.assert_frame_equal(inp, df)
- assert return_value is None
-
- # all
- dropped = df.dropna(axis=1, how="all")
- tm.assert_frame_equal(dropped, df)
-
- df[2] = np.nan
- dropped = df.dropna(axis=1, how="all")
- expected = df.loc[:, [0, 1, 3]]
- tm.assert_frame_equal(dropped, expected)
-
- # bad input
- msg = "No axis named 3 for object type DataFrame"
- with pytest.raises(ValueError, match=msg):
- df.dropna(axis=3)
-
- def test_drop_and_dropna_caching(self):
- # tst that cacher updates
- original = Series([1, 2, np.nan], name="A")
- expected = Series([1, 2], dtype=original.dtype, name="A")
- df = DataFrame({"A": original.values.copy()})
- df2 = df.copy()
- df["A"].dropna()
- tm.assert_series_equal(df["A"], original)
-
- ser = df["A"]
- return_value = ser.dropna(inplace=True)
- tm.assert_series_equal(ser, expected)
- tm.assert_series_equal(df["A"], original)
- assert return_value is None
-
- df2["A"].drop([1])
- tm.assert_series_equal(df2["A"], original)
-
- ser = df2["A"]
- return_value = ser.drop([1], inplace=True)
- tm.assert_series_equal(ser, original.drop([1]))
- tm.assert_series_equal(df2["A"], original)
- assert return_value is None
-
- def test_dropna_corner(self, float_frame):
- # bad input
- msg = "invalid how option: foo"
- with pytest.raises(ValueError, match=msg):
- float_frame.dropna(how="foo")
- # non-existent column - 8303
- with pytest.raises(KeyError, match=r"^\['X'\]$"):
- float_frame.dropna(subset=["A", "X"])
-
- def test_dropna_multiple_axes(self):
- df = DataFrame(
- [
- [1, np.nan, 2, 3],
- [4, np.nan, 5, 6],
- [np.nan, np.nan, np.nan, np.nan],
- [7, np.nan, 8, 9],
- ]
- )
-
- # GH20987
- with pytest.raises(TypeError, match="supplying multiple axes"):
- df.dropna(how="all", axis=[0, 1])
- with pytest.raises(TypeError, match="supplying multiple axes"):
- df.dropna(how="all", axis=(0, 1))
-
- inp = df.copy()
- with pytest.raises(TypeError, match="supplying multiple axes"):
- inp.dropna(how="all", axis=(0, 1), inplace=True)
-
- def test_dropna_tz_aware_datetime(self):
- # GH13407
- df = DataFrame()
- dt1 = datetime.datetime(2015, 1, 1, tzinfo=dateutil.tz.tzutc())
- dt2 = datetime.datetime(2015, 2, 2, tzinfo=dateutil.tz.tzutc())
- df["Time"] = [dt1]
- result = df.dropna(axis=0)
- expected = DataFrame({"Time": [dt1]})
- tm.assert_frame_equal(result, expected)
-
- # Ex2
- df = DataFrame({"Time": [dt1, None, np.nan, dt2]})
- result = df.dropna(axis=0)
- expected = DataFrame([dt1, dt2], columns=["Time"], index=[0, 3])
- tm.assert_frame_equal(result, expected)
-
- def test_dropna_categorical_interval_index(self):
- # GH 25087
- ii = pd.IntervalIndex.from_breaks([0, 2.78, 3.14, 6.28])
- ci = pd.CategoricalIndex(ii)
- df = DataFrame({"A": list("abc")}, index=ci)
-
- expected = df
- result = df.dropna()
- tm.assert_frame_equal(result, expected)
-
- def test_dropna_with_duplicate_columns(self):
- df = DataFrame(
- {
- "A": np.random.default_rng(2).standard_normal(5),
- "B": np.random.default_rng(2).standard_normal(5),
- "C": np.random.default_rng(2).standard_normal(5),
- "D": ["a", "b", "c", "d", "e"],
- }
- )
- df.iloc[2, [0, 1, 2]] = np.nan
- df.iloc[0, 0] = np.nan
- df.iloc[1, 1] = np.nan
- df.iloc[:, 3] = np.nan
- expected = df.dropna(subset=["A", "B", "C"], how="all")
- expected.columns = ["A", "A", "B", "C"]
-
- df.columns = ["A", "A", "B", "C"]
-
- result = df.dropna(subset=["A", "C"], how="all")
- tm.assert_frame_equal(result, expected)
-
- def test_set_single_column_subset(self):
- # GH 41021
- df = DataFrame({"A": [1, 2, 3], "B": list("abc"), "C": [4, np.nan, 5]})
- expected = DataFrame(
- {"A": [1, 3], "B": list("ac"), "C": [4.0, 5.0]}, index=[0, 2]
- )
- result = df.dropna(subset="C")
- tm.assert_frame_equal(result, expected)
-
- def test_single_column_not_present_in_axis(self):
- # GH 41021
- df = DataFrame({"A": [1, 2, 3]})
-
- # Column not present
- with pytest.raises(KeyError, match="['D']"):
- df.dropna(subset="D", axis=0)
-
- def test_subset_is_nparray(self):
- # GH 41021
- df = DataFrame({"A": [1, 2, np.nan], "B": list("abc"), "C": [4, np.nan, 5]})
- expected = DataFrame({"A": [1.0], "B": ["a"], "C": [4.0]})
- result = df.dropna(subset=np.array(["A", "C"]))
- tm.assert_frame_equal(result, expected)
-
- def test_no_nans_in_frame(self, axis):
- # GH#41965
- df = DataFrame([[1, 2], [3, 4]], columns=pd.RangeIndex(0, 2))
- expected = df.copy()
- result = df.dropna(axis=axis)
- tm.assert_frame_equal(result, expected, check_index_type=True)
-
- def test_how_thresh_param_incompatible(self):
- # GH46575
- df = DataFrame([1, 2, pd.NA])
- msg = "You cannot set both the how and thresh arguments at the same time"
- with pytest.raises(TypeError, match=msg):
- df.dropna(how="all", thresh=2)
-
- with pytest.raises(TypeError, match=msg):
- df.dropna(how="any", thresh=2)
-
- with pytest.raises(TypeError, match=msg):
- df.dropna(how=None, thresh=None)
-
- @pytest.mark.parametrize("val", [1, 1.5])
- def test_dropna_ignore_index(self, val):
- # GH#31725
- df = DataFrame({"a": [1, 2, val]}, index=[3, 2, 1])
- result = df.dropna(ignore_index=True)
- expected = DataFrame({"a": [1, 2, val]})
- tm.assert_frame_equal(result, expected)
-
- df.dropna(ignore_index=True, inplace=True)
- tm.assert_frame_equal(df, expected)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_interval.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_interval.py
deleted file mode 100644
index 192aaacbac2b56c6f9bbd3970cd0c4a210ddf035..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_interval.py
+++ /dev/null
@@ -1,279 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import (
- Interval,
- Period,
- Timedelta,
- Timestamp,
-)
-import pandas._testing as tm
-import pandas.core.common as com
-
-
-@pytest.fixture
-def interval():
- return Interval(0, 1)
-
-
-class TestInterval:
- def test_properties(self, interval):
- assert interval.closed == "right"
- assert interval.left == 0
- assert interval.right == 1
- assert interval.mid == 0.5
-
- def test_repr(self, interval):
- assert repr(interval) == "Interval(0, 1, closed='right')"
- assert str(interval) == "(0, 1]"
-
- interval_left = Interval(0, 1, closed="left")
- assert repr(interval_left) == "Interval(0, 1, closed='left')"
- assert str(interval_left) == "[0, 1)"
-
- def test_contains(self, interval):
- assert 0.5 in interval
- assert 1 in interval
- assert 0 not in interval
-
- interval_both = Interval(0, 1, "both")
- assert 0 in interval_both
- assert 1 in interval_both
-
- interval_neither = Interval(0, 1, closed="neither")
- assert 0 not in interval_neither
- assert 0.5 in interval_neither
- assert 1 not in interval_neither
-
- def test_equal(self):
- assert Interval(0, 1) == Interval(0, 1, closed="right")
- assert Interval(0, 1) != Interval(0, 1, closed="left")
- assert Interval(0, 1) != 0
-
- def test_comparison(self):
- msg = (
- "'<' not supported between instances of "
- "'pandas._libs.interval.Interval' and 'int'"
- )
- with pytest.raises(TypeError, match=msg):
- Interval(0, 1) < 2
-
- assert Interval(0, 1) < Interval(1, 2)
- assert Interval(0, 1) < Interval(0, 2)
- assert Interval(0, 1) < Interval(0.5, 1.5)
- assert Interval(0, 1) <= Interval(0, 1)
- assert Interval(0, 1) > Interval(-1, 2)
- assert Interval(0, 1) >= Interval(0, 1)
-
- def test_hash(self, interval):
- # should not raise
- hash(interval)
-
- @pytest.mark.parametrize(
- "left, right, expected",
- [
- (0, 5, 5),
- (-2, 5.5, 7.5),
- (10, 10, 0),
- (10, np.inf, np.inf),
- (-np.inf, -5, np.inf),
- (-np.inf, np.inf, np.inf),
- (Timedelta("0 days"), Timedelta("5 days"), Timedelta("5 days")),
- (Timedelta("10 days"), Timedelta("10 days"), Timedelta("0 days")),
- (Timedelta("1H10min"), Timedelta("5H5min"), Timedelta("3H55min")),
- (Timedelta("5S"), Timedelta("1H"), Timedelta("59min55S")),
- ],
- )
- def test_length(self, left, right, expected):
- # GH 18789
- iv = Interval(left, right)
- result = iv.length
- assert result == expected
-
- @pytest.mark.parametrize(
- "left, right, expected",
- [
- ("2017-01-01", "2017-01-06", "5 days"),
- ("2017-01-01", "2017-01-01 12:00:00", "12 hours"),
- ("2017-01-01 12:00", "2017-01-01 12:00:00", "0 days"),
- ("2017-01-01 12:01", "2017-01-05 17:31:00", "4 days 5 hours 30 min"),
- ],
- )
- @pytest.mark.parametrize("tz", (None, "UTC", "CET", "US/Eastern"))
- def test_length_timestamp(self, tz, left, right, expected):
- # GH 18789
- iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))
- result = iv.length
- expected = Timedelta(expected)
- assert result == expected
-
- @pytest.mark.parametrize(
- "left, right",
- [
- (0, 1),
- (Timedelta("0 days"), Timedelta("1 day")),
- (Timestamp("2018-01-01"), Timestamp("2018-01-02")),
- (
- Timestamp("2018-01-01", tz="US/Eastern"),
- Timestamp("2018-01-02", tz="US/Eastern"),
- ),
- ],
- )
- def test_is_empty(self, left, right, closed):
- # GH27219
- # non-empty always return False
- iv = Interval(left, right, closed)
- assert iv.is_empty is False
-
- # same endpoint is empty except when closed='both' (contains one point)
- iv = Interval(left, left, closed)
- result = iv.is_empty
- expected = closed != "both"
- assert result is expected
-
- @pytest.mark.parametrize(
- "left, right",
- [
- ("a", "z"),
- (("a", "b"), ("c", "d")),
- (list("AB"), list("ab")),
- (Interval(0, 1), Interval(1, 2)),
- (Period("2018Q1", freq="Q"), Period("2018Q1", freq="Q")),
- ],
- )
- def test_construct_errors(self, left, right):
- # GH 23013
- msg = "Only numeric, Timestamp and Timedelta endpoints are allowed"
- with pytest.raises(ValueError, match=msg):
- Interval(left, right)
-
- def test_math_add(self, closed):
- interval = Interval(0, 1, closed=closed)
- expected = Interval(1, 2, closed=closed)
-
- result = interval + 1
- assert result == expected
-
- result = 1 + interval
- assert result == expected
-
- result = interval
- result += 1
- assert result == expected
-
- msg = r"unsupported operand type\(s\) for \+"
- with pytest.raises(TypeError, match=msg):
- interval + interval
-
- with pytest.raises(TypeError, match=msg):
- interval + "foo"
-
- def test_math_sub(self, closed):
- interval = Interval(0, 1, closed=closed)
- expected = Interval(-1, 0, closed=closed)
-
- result = interval - 1
- assert result == expected
-
- result = interval
- result -= 1
- assert result == expected
-
- msg = r"unsupported operand type\(s\) for -"
- with pytest.raises(TypeError, match=msg):
- interval - interval
-
- with pytest.raises(TypeError, match=msg):
- interval - "foo"
-
- def test_math_mult(self, closed):
- interval = Interval(0, 1, closed=closed)
- expected = Interval(0, 2, closed=closed)
-
- result = interval * 2
- assert result == expected
-
- result = 2 * interval
- assert result == expected
-
- result = interval
- result *= 2
- assert result == expected
-
- msg = r"unsupported operand type\(s\) for \*"
- with pytest.raises(TypeError, match=msg):
- interval * interval
-
- msg = r"can\'t multiply sequence by non-int"
- with pytest.raises(TypeError, match=msg):
- interval * "foo"
-
- def test_math_div(self, closed):
- interval = Interval(0, 1, closed=closed)
- expected = Interval(0, 0.5, closed=closed)
-
- result = interval / 2.0
- assert result == expected
-
- result = interval
- result /= 2.0
- assert result == expected
-
- msg = r"unsupported operand type\(s\) for /"
- with pytest.raises(TypeError, match=msg):
- interval / interval
-
- with pytest.raises(TypeError, match=msg):
- interval / "foo"
-
- def test_math_floordiv(self, closed):
- interval = Interval(1, 2, closed=closed)
- expected = Interval(0, 1, closed=closed)
-
- result = interval // 2
- assert result == expected
-
- result = interval
- result //= 2
- assert result == expected
-
- msg = r"unsupported operand type\(s\) for //"
- with pytest.raises(TypeError, match=msg):
- interval // interval
-
- with pytest.raises(TypeError, match=msg):
- interval // "foo"
-
- def test_constructor_errors(self):
- msg = "invalid option for 'closed': foo"
- with pytest.raises(ValueError, match=msg):
- Interval(0, 1, closed="foo")
-
- msg = "left side of interval must be <= right side"
- with pytest.raises(ValueError, match=msg):
- Interval(1, 0)
-
- @pytest.mark.parametrize(
- "tz_left, tz_right", [(None, "UTC"), ("UTC", None), ("UTC", "US/Eastern")]
- )
- def test_constructor_errors_tz(self, tz_left, tz_right):
- # GH 18538
- left = Timestamp("2017-01-01", tz=tz_left)
- right = Timestamp("2017-01-02", tz=tz_right)
-
- if com.any_none(tz_left, tz_right):
- error = TypeError
- msg = "Cannot compare tz-naive and tz-aware timestamps"
- else:
- error = ValueError
- msg = "left and right must have the same time zone"
- with pytest.raises(error, match=msg):
- Interval(left, right)
-
- def test_equality_comparison_broadcasts_over_array(self):
- # https://github.com/pandas-dev/pandas/issues/35931
- interval = Interval(0, 1)
- arr = np.array([interval, interval])
- result = interval == arr
- expected = np.array([True, True])
- tm.assert_numpy_array_equal(result, expected)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/toolz/tests/test_dicttoolz.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/toolz/tests/test_dicttoolz.py
deleted file mode 100644
index d45cd6cf0c89c04ad78e2bd10e590fcb720b0b63..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/toolz/tests/test_dicttoolz.py
+++ /dev/null
@@ -1,270 +0,0 @@
-from collections import defaultdict as _defaultdict
-from collections.abc import Mapping
-import os
-from toolz.dicttoolz import (merge, merge_with, valmap, keymap, update_in,
- assoc, dissoc, keyfilter, valfilter, itemmap,
- itemfilter, assoc_in)
-from toolz.functoolz import identity
-from toolz.utils import raises
-
-
-def inc(x):
- return x + 1
-
-
-def iseven(i):
- return i % 2 == 0
-
-
-class TestDict(object):
- """Test typical usage: dict inputs, no factory keyword.
-
- Class attributes:
- D: callable that inputs a dict and creates or returns a MutableMapping
- kw: kwargs dict to specify "factory" keyword (if applicable)
- """
- D = dict
- kw = {}
-
- def test_merge(self):
- D, kw = self.D, self.kw
- assert merge(D({1: 1, 2: 2}), D({3: 4}), **kw) == D({1: 1, 2: 2, 3: 4})
-
- def test_merge_iterable_arg(self):
- D, kw = self.D, self.kw
- assert merge([D({1: 1, 2: 2}), D({3: 4})], **kw) == D({1: 1, 2: 2, 3: 4})
-
- def test_merge_with(self):
- D, kw = self.D, self.kw
- dicts = D({1: 1, 2: 2}), D({1: 10, 2: 20})
- assert merge_with(sum, *dicts, **kw) == D({1: 11, 2: 22})
- assert merge_with(tuple, *dicts, **kw) == D({1: (1, 10), 2: (2, 20)})
-
- dicts = D({1: 1, 2: 2, 3: 3}), D({1: 10, 2: 20})
- assert merge_with(sum, *dicts, **kw) == D({1: 11, 2: 22, 3: 3})
- assert merge_with(tuple, *dicts, **kw) == D({1: (1, 10), 2: (2, 20), 3: (3,)})
-
- assert not merge_with(sum)
-
- def test_merge_with_iterable_arg(self):
- D, kw = self.D, self.kw
- dicts = D({1: 1, 2: 2}), D({1: 10, 2: 20})
- assert merge_with(sum, *dicts, **kw) == D({1: 11, 2: 22})
- assert merge_with(sum, dicts, **kw) == D({1: 11, 2: 22})
- assert merge_with(sum, iter(dicts), **kw) == D({1: 11, 2: 22})
-
- def test_valmap(self):
- D, kw = self.D, self.kw
- assert valmap(inc, D({1: 1, 2: 2}), **kw) == D({1: 2, 2: 3})
-
- def test_keymap(self):
- D, kw = self.D, self.kw
- assert keymap(inc, D({1: 1, 2: 2}), **kw) == D({2: 1, 3: 2})
-
- def test_itemmap(self):
- D, kw = self.D, self.kw
- assert itemmap(reversed, D({1: 2, 2: 4}), **kw) == D({2: 1, 4: 2})
-
- def test_valfilter(self):
- D, kw = self.D, self.kw
- assert valfilter(iseven, D({1: 2, 2: 3}), **kw) == D({1: 2})
-
- def test_keyfilter(self):
- D, kw = self.D, self.kw
- assert keyfilter(iseven, D({1: 2, 2: 3}), **kw) == D({2: 3})
-
- def test_itemfilter(self):
- D, kw = self.D, self.kw
- assert itemfilter(lambda item: iseven(item[0]), D({1: 2, 2: 3}), **kw) == D({2: 3})
- assert itemfilter(lambda item: iseven(item[1]), D({1: 2, 2: 3}), **kw) == D({1: 2})
-
- def test_assoc(self):
- D, kw = self.D, self.kw
- assert assoc(D({}), "a", 1, **kw) == D({"a": 1})
- assert assoc(D({"a": 1}), "a", 3, **kw) == D({"a": 3})
- assert assoc(D({"a": 1}), "b", 3, **kw) == D({"a": 1, "b": 3})
-
- # Verify immutability:
- d = D({'x': 1})
- oldd = d
- assoc(d, 'x', 2, **kw)
- assert d is oldd
-
- def test_dissoc(self):
- D, kw = self.D, self.kw
- assert dissoc(D({"a": 1}), "a", **kw) == D({})
- assert dissoc(D({"a": 1, "b": 2}), "a", **kw) == D({"b": 2})
- assert dissoc(D({"a": 1, "b": 2}), "b", **kw) == D({"a": 1})
- assert dissoc(D({"a": 1, "b": 2}), "a", "b", **kw) == D({})
- assert dissoc(D({"a": 1}), "a", **kw) == dissoc(dissoc(D({"a": 1}), "a", **kw), "a", **kw)
-
- # Verify immutability:
- d = D({'x': 1})
- oldd = d
- d2 = dissoc(d, 'x', **kw)
- assert d is oldd
- assert d2 is not oldd
-
- def test_assoc_in(self):
- D, kw = self.D, self.kw
- assert assoc_in(D({"a": 1}), ["a"], 2, **kw) == D({"a": 2})
- assert (assoc_in(D({"a": D({"b": 1})}), ["a", "b"], 2, **kw) ==
- D({"a": D({"b": 2})}))
- assert assoc_in(D({}), ["a", "b"], 1, **kw) == D({"a": D({"b": 1})})
-
- # Verify immutability:
- d = D({'x': 1})
- oldd = d
- d2 = assoc_in(d, ['x'], 2, **kw)
- assert d is oldd
- assert d2 is not oldd
-
- def test_update_in(self):
- D, kw = self.D, self.kw
- assert update_in(D({"a": 0}), ["a"], inc, **kw) == D({"a": 1})
- assert update_in(D({"a": 0, "b": 1}), ["b"], str, **kw) == D({"a": 0, "b": "1"})
- assert (update_in(D({"t": 1, "v": D({"a": 0})}), ["v", "a"], inc, **kw) ==
- D({"t": 1, "v": D({"a": 1})}))
- # Handle one missing key.
- assert update_in(D({}), ["z"], str, None, **kw) == D({"z": "None"})
- assert update_in(D({}), ["z"], inc, 0, **kw) == D({"z": 1})
- assert update_in(D({}), ["z"], lambda x: x+"ar", default="b", **kw) == D({"z": "bar"})
- # Same semantics as Clojure for multiple missing keys, ie. recursively
- # create nested empty dictionaries to the depth specified by the
- # keys with the innermost value set to f(default).
- assert update_in(D({}), [0, 1], inc, default=-1, **kw) == D({0: D({1: 0})})
- assert update_in(D({}), [0, 1], str, default=100, **kw) == D({0: D({1: "100"})})
- assert (update_in(D({"foo": "bar", 1: 50}), ["d", 1, 0], str, 20, **kw) ==
- D({"foo": "bar", 1: 50, "d": D({1: D({0: "20"})})}))
- # Verify immutability:
- d = D({'x': 1})
- oldd = d
- update_in(d, ['x'], inc, **kw)
- assert d is oldd
-
- def test_factory(self):
- D, kw = self.D, self.kw
- assert merge(defaultdict(int, D({1: 2})), D({2: 3})) == {1: 2, 2: 3}
- assert (merge(defaultdict(int, D({1: 2})), D({2: 3}),
- factory=lambda: defaultdict(int)) ==
- defaultdict(int, D({1: 2, 2: 3})))
- assert not (merge(defaultdict(int, D({1: 2})), D({2: 3}),
- factory=lambda: defaultdict(int)) == {1: 2, 2: 3})
- assert raises(TypeError, lambda: merge(D({1: 2}), D({2: 3}), factoryy=dict))
-
-
-class defaultdict(_defaultdict):
- def __eq__(self, other):
- return (super(defaultdict, self).__eq__(other) and
- isinstance(other, _defaultdict) and
- self.default_factory == other.default_factory)
-
-
-class TestDefaultDict(TestDict):
- """Test defaultdict as input and factory
-
- Class attributes:
- D: callable that inputs a dict and creates or returns a MutableMapping
- kw: kwargs dict to specify "factory" keyword (if applicable)
- """
- @staticmethod
- def D(dict_):
- return defaultdict(int, dict_)
-
- kw = {'factory': lambda: defaultdict(int)}
-
-
-class CustomMapping(object):
- """Define methods of the MutableMapping protocol required by dicttoolz"""
- def __init__(self, *args, **kwargs):
- self._d = dict(*args, **kwargs)
-
- def __getitem__(self, key):
- return self._d[key]
-
- def __setitem__(self, key, val):
- self._d[key] = val
-
- def __delitem__(self, key):
- del self._d[key]
-
- def __iter__(self):
- return iter(self._d)
-
- def __len__(self):
- return len(self._d)
-
- def __contains__(self, key):
- return key in self._d
-
- def __eq__(self, other):
- return isinstance(other, CustomMapping) and self._d == other._d
-
- def __ne__(self, other):
- return not isinstance(other, CustomMapping) or self._d != other._d
-
- def keys(self):
- return self._d.keys()
-
- def values(self):
- return self._d.values()
-
- def items(self):
- return self._d.items()
-
- def update(self, *args, **kwargs):
- self._d.update(*args, **kwargs)
-
- # Unused methods that are part of the MutableMapping protocol
- #def get(self, key, *args):
- # return self._d.get(key, *args)
-
- #def pop(self, key, *args):
- # return self._d.pop(key, *args)
-
- #def popitem(self, key):
- # return self._d.popitem()
-
- #def clear(self):
- # self._d.clear()
-
- #def setdefault(self, key, *args):
- # return self._d.setdefault(self, key, *args)
-
-
-class TestCustomMapping(TestDict):
- """Test CustomMapping as input and factory
-
- Class attributes:
- D: callable that inputs a dict and creates or returns a MutableMapping
- kw: kwargs dict to specify "factory" keyword (if applicable)
- """
- D = CustomMapping
- kw = {'factory': lambda: CustomMapping()}
-
-
-def test_environ():
- # See: https://github.com/pytoolz/cytoolz/issues/127
- assert keymap(identity, os.environ) == os.environ
- assert valmap(identity, os.environ) == os.environ
- assert itemmap(identity, os.environ) == os.environ
-
-
-def test_merge_with_non_dict_mappings():
- class Foo(Mapping):
- def __init__(self, d):
- self.d = d
-
- def __iter__(self):
- return iter(self.d)
-
- def __getitem__(self, key):
- return self.d[key]
-
- def __len__(self):
- return len(self.d)
-
- d = Foo({1: 1})
-
- assert merge(d) is d or merge(d) == {1: 1}
- assert merge_with(sum, d) == {1: 1}
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tzdata/zoneinfo/Chile/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tzdata/zoneinfo/Chile/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/pyInter/Liyuu_sovits4/preprocess_flist_config.py b/spaces/pyInter/Liyuu_sovits4/preprocess_flist_config.py
deleted file mode 100644
index 6e3dd0bd9390a509c282bbde4ff2631ac94404e4..0000000000000000000000000000000000000000
--- a/spaces/pyInter/Liyuu_sovits4/preprocess_flist_config.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import os
-import argparse
-import re
-
-from tqdm import tqdm
-from random import shuffle
-import json
-
-config_template = json.load(open("configs/config.json"))
-
-pattern = re.compile(r'^[\.a-zA-Z0-9_\/]+$')
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list")
- parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list")
- parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list")
- parser.add_argument("--source_dir", type=str, default="./dataset/44k", help="path to source dir")
- args = parser.parse_args()
-
- train = []
- val = []
- test = []
- idx = 0
- spk_dict = {}
- spk_id = 0
- for speaker in tqdm(os.listdir(args.source_dir)):
- spk_dict[speaker] = spk_id
- spk_id += 1
- wavs = ["/".join([args.source_dir, speaker, i]) for i in os.listdir(os.path.join(args.source_dir, speaker))]
- for wavpath in wavs:
- if not pattern.match(wavpath):
- print(f"warning:文件名{wavpath}中包含非字母数字下划线,可能会导致错误。(也可能不会)")
- if len(wavs) < 10:
- print(f"warning:{speaker}数据集数量小于10条,请补充数据")
- wavs = [i for i in wavs if i.endswith("wav")]
- shuffle(wavs)
- train += wavs[2:-2]
- val += wavs[:2]
- test += wavs[-2:]
-
- shuffle(train)
- shuffle(val)
- shuffle(test)
-
- print("Writing", args.train_list)
- with open(args.train_list, "w") as f:
- for fname in tqdm(train):
- wavpath = fname
- f.write(wavpath + "\n")
-
- print("Writing", args.val_list)
- with open(args.val_list, "w") as f:
- for fname in tqdm(val):
- wavpath = fname
- f.write(wavpath + "\n")
-
- print("Writing", args.test_list)
- with open(args.test_list, "w") as f:
- for fname in tqdm(test):
- wavpath = fname
- f.write(wavpath + "\n")
-
- config_template["spk"] = spk_dict
- print("Writing configs/config.json")
- with open("configs/config.json", "w") as f:
- json.dump(config_template, f, indent=2)
diff --git a/spaces/pycoming/bingo/src/components/chat-list.tsx b/spaces/pycoming/bingo/src/components/chat-list.tsx
deleted file mode 100644
index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000
--- a/spaces/pycoming/bingo/src/components/chat-list.tsx
+++ /dev/null
@@ -1,28 +0,0 @@
-import React from 'react'
-
-import { Separator } from '@/components/ui/separator'
-import { ChatMessage } from '@/components/chat-message'
-import { ChatMessageModel } from '@/lib/bots/bing/types'
-
-export interface ChatList {
- messages: ChatMessageModel[]
-}
-
-export function ChatList({ messages }: ChatList) {
- if (!messages.length) {
- return null
- }
-
- return (
-
- {messages.map((message, index) => (
-
-
- {index < messages.length - 1 && (
-
- )}
-
- ))}
-
- )
-}
diff --git a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py b/spaces/qingxu98/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py
deleted file mode 100644
index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000
--- a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py
+++ /dev/null
@@ -1,245 +0,0 @@
-from typing import Any, Dict, List, Optional, Tuple, Type, Union
-
-import gym
-import numpy as np
-import torch as th
-from torch.nn import functional as F
-
-from stable_baselines3.common import logger
-from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
-from stable_baselines3.common.preprocessing import maybe_transpose
-from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
-from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
-from stable_baselines3.dqn.policies import DQNPolicy
-
-
-class DQN(OffPolicyAlgorithm):
- """
- Deep Q-Network (DQN)
-
- Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
- Default hyperparameters are taken from the nature paper,
- except for the optimizer and learning rate that were taken from Stable Baselines defaults.
-
- :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
- :param env: The environment to learn from (if registered in Gym, can be str)
- :param learning_rate: The learning rate, it can be a function
- of the current progress remaining (from 1 to 0)
- :param buffer_size: size of the replay buffer
- :param learning_starts: how many steps of the model to collect transitions for before learning starts
- :param batch_size: Minibatch size for each gradient update
- :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
- :param gamma: the discount factor
- :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
- like ``(5, "step")`` or ``(2, "episode")``.
- :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
- Set to ``-1`` means to do as many gradient steps as steps done in the environment
- during the rollout.
- :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
- at a cost of more complexity.
- See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
- :param target_update_interval: update the target network every ``target_update_interval``
- environment steps.
- :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
- :param exploration_initial_eps: initial value of random action probability
- :param exploration_final_eps: final value of random action probability
- :param max_grad_norm: The maximum value for the gradient clipping
- :param tensorboard_log: the log location for tensorboard (if None, no logging)
- :param create_eval_env: Whether to create a second environment that will be
- used for evaluating the agent periodically. (Only available when passing string for the environment)
- :param policy_kwargs: additional arguments to be passed to the policy on creation
- :param verbose: the verbosity level: 0 no output, 1 info, 2 debug
- :param seed: Seed for the pseudo random generators
- :param device: Device (cpu, cuda, ...) on which the code should be run.
- Setting it to auto, the code will be run on the GPU if possible.
- :param _init_setup_model: Whether or not to build the network at the creation of the instance
- """
-
- def __init__(
- self,
- policy: Union[str, Type[DQNPolicy]],
- env: Union[GymEnv, str],
- learning_rate: Union[float, Schedule] = 1e-4,
- buffer_size: int = 1000000,
- learning_starts: int = 50000,
- batch_size: Optional[int] = 32,
- tau: float = 1.0,
- gamma: float = 0.99,
- train_freq: Union[int, Tuple[int, str]] = 4,
- gradient_steps: int = 1,
- optimize_memory_usage: bool = False,
- target_update_interval: int = 10000,
- exploration_fraction: float = 0.1,
- exploration_initial_eps: float = 1.0,
- exploration_final_eps: float = 0.05,
- max_grad_norm: float = 10,
- tensorboard_log: Optional[str] = None,
- create_eval_env: bool = False,
- policy_kwargs: Optional[Dict[str, Any]] = None,
- verbose: int = 0,
- seed: Optional[int] = None,
- device: Union[th.device, str] = "auto",
- _init_setup_model: bool = True,
- ):
-
- super(DQN, self).__init__(
- policy,
- env,
- DQNPolicy,
- learning_rate,
- buffer_size,
- learning_starts,
- batch_size,
- tau,
- gamma,
- train_freq,
- gradient_steps,
- action_noise=None, # No action noise
- policy_kwargs=policy_kwargs,
- tensorboard_log=tensorboard_log,
- verbose=verbose,
- device=device,
- create_eval_env=create_eval_env,
- seed=seed,
- sde_support=False,
- optimize_memory_usage=optimize_memory_usage,
- supported_action_spaces=(gym.spaces.Discrete,),
- )
-
- self.exploration_initial_eps = exploration_initial_eps
- self.exploration_final_eps = exploration_final_eps
- self.exploration_fraction = exploration_fraction
- self.target_update_interval = target_update_interval
- self.max_grad_norm = max_grad_norm
- # "epsilon" for the epsilon-greedy exploration
- self.exploration_rate = 0.0
- # Linear schedule will be defined in `_setup_model()`
- self.exploration_schedule = None
- self.q_net, self.q_net_target = None, None
-
- if _init_setup_model:
- self._setup_model()
-
- def _setup_model(self) -> None:
- super(DQN, self)._setup_model()
- self._create_aliases()
- self.exploration_schedule = get_linear_fn(
- self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
- )
-
- def _create_aliases(self) -> None:
- self.q_net = self.policy.q_net
- self.q_net_target = self.policy.q_net_target
-
- def _on_step(self) -> None:
- """
- Update the exploration rate and target network if needed.
- This method is called in ``collect_rollouts()`` after each step in the environment.
- """
- if self.num_timesteps % self.target_update_interval == 0:
- polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
-
- self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
- logger.record("rollout/exploration rate", self.exploration_rate)
-
- def train(self, gradient_steps: int, batch_size: int = 100) -> None:
- # Update learning rate according to schedule
- self._update_learning_rate(self.policy.optimizer)
-
- losses = []
- for _ in range(gradient_steps):
- # Sample replay buffer
- replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
-
- with th.no_grad():
- # Compute the next Q-values using the target network
- next_q_values = self.q_net_target(replay_data.next_observations)
- # Follow greedy policy: use the one with the highest value
- next_q_values, _ = next_q_values.max(dim=1)
- # Avoid potential broadcast issue
- next_q_values = next_q_values.reshape(-1, 1)
- # 1-step TD target
- target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
-
- # Get current Q-values estimates
- current_q_values = self.q_net(replay_data.observations)
-
- # Retrieve the q-values for the actions from the replay buffer
- current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
-
- # Compute Huber loss (less sensitive to outliers)
- loss = F.smooth_l1_loss(current_q_values, target_q_values)
- losses.append(loss.item())
-
- # Optimize the policy
- self.policy.optimizer.zero_grad()
- loss.backward()
- # Clip gradient norm
- th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
- self.policy.optimizer.step()
-
- # Increase update counter
- self._n_updates += gradient_steps
-
- logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
- logger.record("train/loss", np.mean(losses))
-
- def predict(
- self,
- observation: np.ndarray,
- state: Optional[np.ndarray] = None,
- mask: Optional[np.ndarray] = None,
- deterministic: bool = False,
- ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
- """
- Overrides the base_class predict function to include epsilon-greedy exploration.
-
- :param observation: the input observation
- :param state: The last states (can be None, used in recurrent policies)
- :param mask: The last masks (can be None, used in recurrent policies)
- :param deterministic: Whether or not to return deterministic actions.
- :return: the model's action and the next state
- (used in recurrent policies)
- """
- if not deterministic and np.random.rand() < self.exploration_rate:
- if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
- n_batch = observation.shape[0]
- action = np.array([self.action_space.sample() for _ in range(n_batch)])
- else:
- action = np.array(self.action_space.sample())
- else:
- action, state = self.policy.predict(observation, state, mask, deterministic)
- return action, state
-
- def learn(
- self,
- total_timesteps: int,
- callback: MaybeCallback = None,
- log_interval: int = 4,
- eval_env: Optional[GymEnv] = None,
- eval_freq: int = -1,
- n_eval_episodes: int = 5,
- tb_log_name: str = "DQN",
- eval_log_path: Optional[str] = None,
- reset_num_timesteps: bool = True,
- ) -> OffPolicyAlgorithm:
-
- return super(DQN, self).learn(
- total_timesteps=total_timesteps,
- callback=callback,
- log_interval=log_interval,
- eval_env=eval_env,
- eval_freq=eval_freq,
- n_eval_episodes=n_eval_episodes,
- tb_log_name=tb_log_name,
- eval_log_path=eval_log_path,
- reset_num_timesteps=reset_num_timesteps,
- )
-
- def _excluded_save_params(self) -> List[str]:
- return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
-
- def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
- state_dicts = ["policy", "policy.optimizer"]
-
- return state_dicts, []
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/CCleaner 5.0 Serial Key Licence Full Version Free Download.md b/spaces/quidiaMuxgu/Expedit-SAM/CCleaner 5.0 Serial Key Licence Full Version Free Download.md
deleted file mode 100644
index 0848ae6d53a9491a7289ac51ed5739aac8d9a5a7..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/CCleaner 5.0 Serial Key Licence Full Version Free Download.md
+++ /dev/null
@@ -1,12 +0,0 @@
-CCleaner 5.0 Serial Key Licence Full version Free Download Download ✑ https://geags.com/2uCsnN
-
-6 days ago — CCleaner Pro Crack 5.90 With Serial License Key 2022 Keygen JKBNX-DKNVB- . CCleaner Professional Crack With License Key 2022 and Serial Keygen.
-CCleaner Premium Crack With License Key 2020 - CCleaner Professional License Key. ..
-CCleaner Crack With License Key 2020 free download.
-Crack CCleaner Pro 5.90 With Serial Number Free Download - CCleaner Professional Crack with license key, and ccleaner pro key.
-Cracked version of CCleaner Professional License Key.
-Free Download CCleaner Professional 6.10.6162 Crack Full Activation Keygen. .
-CCleaner Pro Crack 5.90. 8a78ff9644
-
-
-
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Cricket Batting Tips In Tamil Pdf 26.md b/spaces/quidiaMuxgu/Expedit-SAM/Cricket Batting Tips In Tamil Pdf 26.md
deleted file mode 100644
index b5a05bff7b0a9e5fddc3b121a90fc503d54200f3..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Cricket Batting Tips In Tamil Pdf 26.md
+++ /dev/null
@@ -1,9 +0,0 @@
-Cricket Batting Tips In Tamil Pdf 26 Download ⇔ https://geags.com/2uCrjo
-
-June 7, 2019 - With driving ranges and bowling machines, the academy also offers players residence accommodation. On the other hand, it also provides . The academy can offer you a wide variety of sports competitions, including football, basketball, volleyball, rugby, badminton, tennis and football competitions (among them). You can also access .
-The academy mainly focuses on professional opportunities, especially for beginners.
-You can play on a professional or amateur basis and all players must be over 18 years of age.
-If you are a beginner, you must sign up for training and take a test. 8a78ff9644
-
-
-
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Histologia Basica Junqueira E Carneiro 11 Ed.md b/spaces/quidiaMuxgu/Expedit-SAM/Histologia Basica Junqueira E Carneiro 11 Ed.md
deleted file mode 100644
index f1b9754ddab66648a52ef16df11bc8528875e9c9..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Histologia Basica Junqueira E Carneiro 11 Ed.md
+++ /dev/null
@@ -1,100 +0,0 @@
-
-Histologia Basica Junqueira e Carneiro 11 ed: Um Livro Essencial para o Estudo da Histologia
-
-Histologia é a ciência que estuda a estrutura, a composição e as funções dos tecidos do corpo humano. A histologia é fundamental para o entendimento da fisiologia, da patologia e da medicina em geral. Para estudar a histologia, é preciso ter um bom livro que apresente os conceitos, as imagens e os exemplos de forma clara, didática e atualizada.
-histologia basica junqueira e carneiro 11 ed Download — https://geags.com/2uCqfV
-
-Um dos livros mais usados e recomendados pelos estudantes e professores de histologia é o Histologia Basica Junqueira e Carneiro 11 ed. Esse livro é uma obra clássica que foi lançada pela primeira vez em 1972 e que já teve mais de 20 edições em vários idiomas. O livro foi escrito pelos renomados professores Luiz Carlos Uchôa Junqueira e José Carneiro, que foram pioneiros no ensino e na pesquisa em histologia no Brasil.
-
-O que você vai encontrar no Histologia Basica Junqueira e Carneiro 11 ed?
-
-O Histologia Basica Junqueira e Carneiro 11 ed é um livro que aborda todos os aspectos da histologia humana, desde os conceitos básicos até os mais avançados. O livro é dividido em quatro partes:
-
-
-A primeira parte trata dos métodos de estudo da histologia, como a microscopia óptica, a microscopia eletrônica, a imunohistoquímica, a citometria de fluxo, entre outros.
-A segunda parte trata dos tecidos básicos do corpo humano, como o tecido epitelial, o tecido conjuntivo, o tecido muscular, o tecido nervoso e o tecido hematopoiético.
-A terceira parte trata dos sistemas orgânicos do corpo humano, como o sistema cardiovascular, o sistema respiratório, o sistema digestório, o sistema urinário, o sistema endócrino, o sistema reprodutor, entre outros.
-A quarta parte trata da embriologia humana, desde a fecundação até o nascimento.
-
-
-O livro conta com mais de 800 páginas de texto e mais de 1000 ilustrações de alta qualidade, incluindo fotos coloridas de microscopia óptica e eletrônica, esquemas explicativos, tabelas comparativas e quadros resumos. O livro também conta com um atlas de histologia com mais de 200 lâminas histológicas comentadas.
-
-Quais são os diferenciais do Histologia Basica Junqueira e Carneiro 11 ed?
-
-O Histologia Basica Junqueira e Carneiro 11 ed é um livro que se destaca por vários motivos. Alguns deles são:
-
-
-É um livro atualizado que acompanha as novas descobertas e as novas tecnologias da área da histologia.
-É um livro completo que abrange todos os temas relevantes da histologia humana.
-É um livro didático que explica os conceitos de forma clara, objetiva e com exemplos práticos.
-É um livro ilustrado que apresenta as imagens de forma nítida, colorida e com legendas detalhadas.
-É um livro interativo que oferece recursos adicionais online, como vídeos, animações, exercícios, testes e casos clínicos.
-
-
-Como adquirir o Histologia Basica Junqueira e Carneiro 11 ed?
-
-Se você quer adquirir o Histologia Basica Junqueira e Carneiro 11 ed, você pode fazer isso de várias formas. Você pode comprar o livro impresso em livrarias físicas ou online. Você pode comprar o livro digital em formato PDF ou ePub em plataformas digitais. Você pode baixar o livro gratuitamente em sites confiáveis ou em redes sociais. Você pode acessar o livro online em sites oficiais ou em bibliotecas virtuais.
-
-
-O importante é que você tenha acesso a esse livro que é uma referência na área da histologia e que vai te ajudar a aprender e a se aprofundar nesse assunto tão fascinante. O Histologia Basica Junqueira e Carneiro 11 ed é um livro que vale a pena ter na sua estante ou no seu dispositivo.
-
-Conclusão
-
-Em conclusão, o Histologia Basica Junqueira e Carneiro 11 ed é um livro essencial para o estudo da histologia humana. Ele é um livro atualizado, completo, didático, ilustrado e interativo que aborda todos os aspectos da histologia desde os métodos de estudo até a embriologia. Ele é um livro que foi escrito por professores renomados que foram pioneiros no ensino e na pesquisa em histologia no Brasil. Ele é um livro que está disponível em vários formatos e em vários meios para facilitar o seu acesso. Se você quer aprender mais sobre a histologia humana, não deixe de adquirir o Histologia Basica Junqueira e Carneiro 11 ed.
-How to see histological images with Histologia Basica Junqueira e Carneiro 11 ed?
-
-One of the best features of Histologia Basica Junqueira e Carneiro 11 ed is that it provides you with a lot of histological images that illustrate the structure and function of the different tissues and organs of the human body. These images are taken from microscopes and show the details and characteristics of the cells, tissues, and organs at different magnifications and resolutions.
-
-To see histological images with Histologia Basica Junqueira e Carneiro 11 ed, you have several options. You can:
-
-
-Look at the images that are printed in the book. The book has more than 1000 illustrations that are arranged in a logical and sequential order according to the topics and chapters of the book.
-Look at the images that are included in the atlas of histology. The book comes with an atlas of histology that has more than 200 histological slides that are commented and explained by the authors.
-Look at the images that are available online. The book has a website that offers you access to more than 500 histological images that are interactive and zoomable. You can also find videos, animations, exercises, tests, and cases online.
-Look at the images that are provided by other sources. You can also find histological images from other books, websites, databases, or applications that are related to histology or anatomy.
-
-
-By seeing histological images with Histologia Basica Junqueira e Carneiro 11 ed, you can improve your understanding and appreciation of histology. You can also compare and contrast different tissues and organs and learn how they work together to maintain the health and function of the human body.
-What are some tips or tricks for interpreting histological images?
-
-Interpreting histological images is not an easy task. It requires a lot of knowledge, skill, and practice. However, there are some tips or tricks that can help you improve your ability to interpret histological images. Here are some of them:
-
-
-Know the basics of histology. Before you try to interpret histological images, you need to have a solid foundation of the basic concepts and principles of histology. You need to know the types, functions, and characteristics of the different cells, tissues, and organs of the human body.
-Know the methods of histology. You also need to know how histological images are obtained and processed. You need to know the types, advantages, and disadvantages of the different methods of histology, such as microtomy, staining, fixation, embedding, etc.
-Know the tools of histology. You also need to know how to use the tools that are available for viewing and analyzing histological images. You need to know how to use microscopes, cameras, computers, software, etc.
-Know the patterns of histology. You also need to know how to recognize and identify the patterns that are common in histological images. You need to know how to distinguish between normal and abnormal tissues and organs, how to classify tumors and lesions, how to correlate structure and function, etc.
-Know the sources of histology. You also need to know where to find reliable and relevant sources of information and reference for histological images. You need to know how to use books, websites, databases, journals, etc.
-
-
-By following these tips or tricks, you can improve your ability to interpret histological images and gain more insight and understanding of histology.
-What are some challenges or limitations of interpreting histological images?
-
-Interpreting histological images is not a simple or straightforward task. It involves a lot of challenges and limitations that can affect the accuracy and reliability of your interpretation. Here are some of them:
-
-
-The quality of histological images depends on many factors, such as the preparation, fixation, staining, cutting, mounting, and preservation of the tissue samples, as well as the type, resolution, and calibration of the microscope and camera used to capture the images.
-The interpretation of histological images requires a lot of experience and expertise, as well as a good knowledge of anatomy, physiology, pathology, and histology. It also requires a lot of attention and concentration, as well as a good eye for detail and pattern recognition.
-The interpretation of histological images can be influenced by subjective factors, such as personal bias, preference, expectation, or emotion. It can also be influenced by external factors, such as peer pressure, time pressure, or ethical issues.
-The interpretation of histological images can be affected by errors or mistakes, such as misidentification, misclassification, misdiagnosis, or miscommunication. It can also be affected by uncertainties or ambiguities, such as variations, anomalies, artifacts, or inconsistencies.
-The interpretation of histological images can be challenged or disputed by other sources of information or evidence, such as clinical data, laboratory tests, genetic tests, or imaging techniques.
-
-
-By being aware of these challenges and limitations, you can improve your ability to interpret histological images and avoid potential pitfalls and problems.
-What are some solutions or strategies for overcoming these challenges or limitations?
-
-Despite the challenges and limitations of interpreting histological images, there are some solutions or strategies that can help you overcome them and improve your interpretation skills. Here are some of them:
-
-
-Improve the quality of histological images by following the best practices and standards for preparing, processing, and preserving tissue samples, as well as using high-quality and well-calibrated equipment and software.
-Improve your knowledge and expertise in histology by studying the theory and practice of histology from reliable and updated sources, such as books, websites, journals, etc. You can also consult with experts or peers who have more experience or knowledge in histology.
-Improve your objectivity and accuracy in histology by avoiding or minimizing subjective or external factors that can influence your interpretation, such as bias, preference, expectation, emotion, pressure, or ethics. You can also use tools or methods that can help you reduce errors or uncertainties, such as checklists, guidelines, algorithms, etc.
-Improve your validation and verification in histology by comparing or contrasting your interpretation with other sources of information or evidence that can support or challenge your interpretation, such as clinical data, laboratory tests, genetic tests, or imaging techniques. You can also seek feedback or review from other experts or peers who can provide you with constructive criticism or suggestions.
-
-
-By applying these solutions or strategies, you can overcome the challenges and limitations of interpreting histological images and enhance your interpretation skills and confidence.
-Conclusion
-
-In conclusion, histologia basica junqueira e carneiro 11 ed is a book that is essential for the study of human histology. It is a book that is updated, complete, didactic, illustrated, and interactive that covers all aspects of histology from the methods of study to embryology. It is a book that was written by renowned professors who were pioneers in teaching and research in histology in Brazil. It is a book that is available in various formats and media to facilitate your access. If you want to learn more about human histology, do not miss the opportunity to acquire histologia basica junqueira e carneiro 11 ed.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Mpc Essentials For Pc Cracked Free 21 LINK.md b/spaces/quidiaMuxgu/Expedit-SAM/Mpc Essentials For Pc Cracked Free 21 LINK.md
deleted file mode 100644
index f2700a9547860ef7192e6e4b9a58cc1ef9929be6..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Mpc Essentials For Pc Cracked Free 21 LINK.md
+++ /dev/null
@@ -1,64 +0,0 @@
-mpc essentials for pc cracked free 21 Download ->>->>->> https://geags.com/2uCqXh
-
--11-07
-
- fr?
-
- :)
-
- kjg: #ubuntu-fr
-
- or #ubuntu-fr-newbie
-
- or something like that
-
- either french or french-newbie
-
- I'm not sure
-
- ok
-
- how do I check that my bios is up to date?
-
- I have to run ubuntu in safe mode. that means I can't check any files
-
- oi
-
- hey
-
- what is the problem?
-
- (besides "yikes, what does ubuntu still use Xorg for!)
-
- a new version of xorg :)
-
- mgedmin, i am trying to install and its telling me that the file I am trying to install is not found
-
- DShepherd: what file?
-
- xserver-xorg
-
- what is the error message?
-
- jrib, what files are you working on right now?
-
- mgedmin, wait.. i will send a pic
-
- thanks
-
- try this: ls -l xserver-xorg | grep ^-rw
-
- the actual error message is more informative
-
- but the most important file that's missing is xserver-xorg.initramfs
-
- the initramfs is what your hardware uses to boot up the kernel
-
- -rw-r--r-- 1 root root 14116870856 2007-10-29 10:15 xserver-xorg-core
-
- -rw-r--r-- 1 root root 14852130576 2007-10-29 13:24 xserver-xorg-core_7.0.15-1ubuntu1_i386.deb
-
- -rw-r--r-- 1 root root 6558 2007-10-29 13:25 xserver-xorg-driver-i810_1.6.3-3ubuntu 4fefd39f24
-
-
-
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Battle Los Angeles PC Activation Code Where to Find and Redeem It.md b/spaces/raedeXanto/academic-chatgpt-beta/Battle Los Angeles PC Activation Code Where to Find and Redeem It.md
deleted file mode 100644
index f4950d08c4b3fa3801b5098ae01d295e6b446ce9..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Battle Los Angeles PC Activation Code Where to Find and Redeem It.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-Battle Los Angeles PC Activation Code: What You Need to Know
- If you are a fan of first-person shooter games, you might have heard of Battle Los Angeles , a game based on the 2011 movie of the same name. In this game, you play as a US Marine who fights against an alien invasion in Los Angeles. The game features realistic graphics, intense combat, and various weapons and vehicles to use.
- However, before you can enjoy this game on your PC, you need to have an activation code that verifies your purchase and allows you to install and play the game. Without an activation code, you won't be able to access the game at all.
-battle los angeles pc activation code Download ⚙ https://tinourl.com/2uL4D7
- So, how do you get an activation code for Battle Los Angeles? And what are the risks and challenges of using one? In this article, we will answer these questions and provide you with some methods to find or generate an activation code for this game.
- Methods to Find or Generate an Activation Code
- There are three main methods that you can use to get an activation code for Battle Los Angeles. Each method has its own advantages and disadvantages, so you should choose the one that suits your needs and preferences.
- Method 1: Buy the game from an official source
- The most straightforward and reliable way to get an activation code is to buy the game from an official source, such as Steam, Amazon, or Konami (the publisher of the game). When you buy the game from these sources, you will receive an activation code along with your purchase confirmation. You can then use this code to install and play the game on your PC.
-battle los angeles game product key
-how to activate battle los angeles on steam
-battle los angeles pc game serial number
-battle los angeles crack and activation key generator
-where to find battle los angeles cd key
-battle los angeles pc game license code
-battle los angeles steam key free
-how to get battle los angeles for free on pc
-battle los angeles pc game activation key download
-battle los angeles full version with crack and keygen
-battle los angeles pc game registration code
-how to install battle los angeles without cd
-battle los angeles pc game unlock code
-battle los angeles skidrow activation key
-how to play battle los angeles online
-battle los angeles pc game activation code generator
-how to bypass battle los angeles activation
-battle los angeles reloaded activation key
-how to fix battle los angeles error code 1305
-battle los angeles pc game activation key free
-how to update battle los angeles on pc
-battle los angeles razor1911 activation key
-how to remove battle los angeles activation window
-battle los angeles pc game serial key free download
-how to change language in battle los angeles pc game
-battle los angeles pc game crack and keygen download
-how to run battle los angeles on windows 10
-battle los angeles pc game product key generator
-how to uninstall battle los angeles from pc
-battle los angeles pc game activation code free download
-how to mod battle los angeles pc game
-battle los angeles pc game keygen and crack free download
-how to increase fps in battle los angeles pc game
-battle los angeles pc game serial number and activation code
-how to save progress in battle los angeles pc game
-battle los angeles pc game crack and activation code download
-how to use cheats in battle los angeles pc game
-battle los angeles pc game license key free download
-how to make a backup of battle los angeles pc game
-battle los angeles pc game registration key free download
-how to change resolution in battle los angeles pc game
-battle los angeles pc game unlock code free download
-how to enable subtitles in battle los angeles pc game
-battle los angeles steam activation code free download
-how to connect a controller to battle los angeles pc game
-battle los angeles origin activation code free download
-how to create a shortcut for battle los angeles pc game
-battle los angeles uplay activation code free download
-how to verify integrity of files for battle los angeles pc game
- The advantage of this method is that you will get a legitimate and valid code that works without any issues. You will also support the developers and publishers of the game, who deserve to be compensated for their work. Additionally, you will be able to access updates, patches, and online features of the game without any problems.
- The disadvantage of this method is that you will have to pay for the game, which might not be affordable or convenient for some people. The price of the game varies depending on the source and region, but it usually ranges from $10 to $20. You will also need a stable internet connection and enough disk space to download and install the game.
- Method 2: Use a key generator tool
- Another way to get an activation code is to use a key generator tool, which is a software program that creates random codes for various games and software. You can find many key generator tools online, such as KeyGenNinja, SerialBay, or AllKeygensDownload. These tools claim to generate working codes for Battle Los Angeles and other games.
- The advantage of this method is that you don't have to pay anything for the code, as these tools are usually free or require a small donation. You also don't need an internet connection or disk space to use these tools, as they run offline and are usually small in size.
- The disadvantage of this method is that it is very risky and unreliable. Most of these tools are illegal and violate the terms and conditions of the game. Using them can result in legal issues and penalties, such as fines or lawsuits. Moreover, many of these tools are infected with malware and viruses that can harm your PC and compromise your personal data. Furthermore, many of these codes are invalid or expired, meaning that they won't work or will stop working after a while.
- Method 3: Search online for free codes
- The third way to get an activation code is to search online for free codes that other people have shared or posted. You can find many websites, forums, blogs, videos, or social media posts that offer free codes for Battle Los Angeles and other games. Some examples are ChapterCheats, YouTube, or PDFDrive. These sources claim to provide working codes for Battle Los Angeles that you can use without paying anything.
- The advantage of this method is that you don't have to pay anything for the code, as these sources are free and accessible. You also don't need any software or tool to use these codes, as they are already generated and ready to use.
- The disadvantage of this method is that it is also very risky and unreliable. Like the previous method, most of these sources are illegal and violate the terms and conditions of the game. Using them can result in legal issues and penalties, such as fines or lawsuits. Moreover, many of these sources are infected with malware and viruses that can harm your PC and compromise your personal data. Furthermore, many of these codes are invalid or expired, meaning that they won't work or will stop working after a while.
- Risks and Challenges of Using an Activation Code
- As you can see from the above methods, getting an activation code for Battle Los Angeles is not easy or safe. There are many risks and challenges involved in using an activation code for this game. Here are some of them:
- Risk 1: Legal issues and penalties
- Using an activation code that is not obtained from an official source is considered piracy and theft. It violates the intellectual property rights of the developers and publishers of the game. It also breaches the end-user license agreement (EULA) that you agree to when you install the game. By using an illegal activation code, you are exposing yourself to legal issues and penalties, such as fines or lawsuits. You could also face criminal charges or imprisonment in some countries.
- Risk 2: Malware and viruses
- Using an activation code that is obtained from an untrusted source is dangerous for your PC and personal data. Many sources that offer free or generated codes are infected with malware and viruses that can harm your PC and compromise your personal data. These malware and viruses can steal your identity, passwords, bank accounts, credit cards, or other sensitive information. They can also damage your files, programs, system settings, or hardware components. They can also make your PC slow, unstable, or unusable.
- Risk 3: Invalid or expired codes
- or because they are detected and blocked by the game's security system. When this happens, you won't be able to install or play the game at all. You will also waste your time and effort trying to find or generate another code that works.
- Conclusion
- In conclusion, getting an activation code for Battle Los Angeles is not a simple or safe task. There are three main methods that you can use to get an activation code: buying the game from an official source, using a key generator tool, or searching online for free codes. However, each method has its own risks and challenges, such as legal issues and penalties, malware and viruses, or invalid or expired codes. Therefore, you should be careful and cautious when using an activation code for this game.
- Here are some recommendations and tips that we suggest you follow:
-
-The best and safest way to get an activation code is to buy the game from an official source, such as Steam, Amazon, or Konami. This way, you will get a legitimate and valid code that works without any issues. You will also support the developers and publishers of the game, who deserve to be compensated for their work. Additionally, you will be able to access updates, patches, and online features of the game without any problems.
-If you decide to use a key generator tool or search online for free codes, make sure that you use a trusted and reputable source that has positive reviews and feedback from other users. You should also scan the source and the code with a reliable antivirus program before using them. You should also backup your PC and personal data before installing or playing the game.
-If you encounter any problems or errors with the activation code or the game, you should contact the customer support of the game or the source that provided you with the code. They might be able to help you solve the issue or provide you with a replacement code.
-
- We hope that this article has helped you understand what you need to know about Battle Los Angeles PC activation code. If you have any questions or comments, please feel free to leave them below. Thank you for reading and have fun playing!
- FAQs
- Here are some frequently asked questions about Battle Los Angeles PC activation code:
-
-What is Battle Los Angeles?
-Battle Los Angeles is a first-person shooter game based on the 2011 movie of the same name. In this game, you play as a US Marine who fights against an alien invasion in Los Angeles. The game features realistic graphics, intense combat, and various weapons and vehicles to use.
-Why do I need an activation code for Battle Los Angeles?
-You need an activation code for Battle Los Angeles to verify your purchase and allow you to install and play the game on your PC. Without an activation code, you won't be able to access the game at all.
-How do I get an activation code for Battle Los Angeles?
-There are three main methods that you can use to get an activation code for Battle Los Angeles: buying the game from an official source, using a key generator tool, or searching online for free codes. However, each method has its own risks and challenges, such as legal issues and penalties, malware and viruses, or invalid or expired codes.
-What are the risks and challenges of using an activation code for Battle Los Angeles?
-The risks and challenges of using an activation code for Battle Los Angeles are: legal issues and penalties, malware and viruses, or invalid or expired codes. These risks and challenges can result in fines or lawsuits, harm your PC and personal data, or prevent you from installing or playing the game.
-What are some tips and recommendations for using an activation code for Battle Los Angeles?
-Some tips and recommendations for using an activation code for Battle Los Angeles are: buy the game from an official source, use a trusted and reputable source for free or generated codes, scan the source and the code with a reliable antivirus program, backup your PC and personal data before installing or playing the game, and contact customer support if you encounter any problems or errors.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Charlie Houpert - Charisma University What You Need to Know to Stand Out Connect and Succeed.md b/spaces/raedeXanto/academic-chatgpt-beta/Charlie Houpert - Charisma University What You Need to Know to Stand Out Connect and Succeed.md
deleted file mode 100644
index d386e9edc5489d14515f7bcdd637875beea811a5..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Charlie Houpert - Charisma University What You Need to Know to Stand Out Connect and Succeed.md
+++ /dev/null
@@ -1,92 +0,0 @@
-
-Charlie Houpert - Charisma University
-Do you want to improve your social skills and become more charismatic? Do you want to make a great impression on anyone you meet, connect with people easily, tell captivating stories, and lead with influence? If you answered yes, then you might be interested in Charisma University, an online course created by Charlie Houpert, the co-founder of Charisma on Command.
-What is Charisma University and who is Charlie Houpert?
-Charisma University is an online course on social skills and charisma
-Charisma University is a 6-week step-by-step program designed to give you all the tools you need to take your charisma to the next level. It's over 10 hours of the best training material available, based on scientific research, real-life examples, and practical exercises. It covers everything from making a great first impression, to building confidence, to mastering conversation skills, to telling captivating stories, to developing a magnetic presence, to leading and influencing others with charisma.
-Charlie Houpert - Charisma University Download Zip ✦✦✦ https://tinourl.com/2uKZti
-Charlie Houpert is the co-founder of Charisma on Command, a YouTube channel and a book on charisma
-Charlie Houpert is the co-founder of Charisma on Command, a YouTube channel with over 4 million subscribers that teaches people how to be more charismatic in any situation. He is also the author of a book with the same name, which has been praised by celebrities like Will Smith and Tony Robbins. Charlie has a background in philosophy and he used to be an introverted and shy person who struggled with social anxiety. He decided to change his life by studying charisma and social skills, and he went from being voted shyest person in his high school class to becoming a leading speaker at one of New York City's biggest personal development meetups. He has also traveled the world, lived in different countries, dated beautiful women, and inspired thousands of people with his charisma.
-What are the benefits of taking Charisma University?
-You will learn how to make a great first impression by hitting four key emotions
-The first rule of making a great first impression is to not try to impress. Instead, you should focus on making the other person feel four key emotions: positivity, trust, respect, and interest. In Charisma University, you will learn how to do this by using body language, eye contact, vocal tonality, humor, compliments, and more. You will also learn how to avoid common mistakes that ruin your first impression, such as being too needy, boring, arrogant, or nervous.
-You will learn how to build confidence and overcome shyness and anxiety
-Confidence is one of the most important aspects of charisma. Without confidence, you will not be able to express yourself fully, connect with others authentically, or take action on your goals. In Charisma University, you will learn how to build confidence from the inside out by changing your mindset, beliefs, habits, and behaviors. You will also learn how to overcome shyness and anxiety by facing your fears, challenging your negative thoughts, and using techniques such as visualization, affirmations, breathing exercises, and more.
-You will learn how to master conversation skills and connect with anyone
-Conversation skills are essential for building rapport, trust, and likability with anyone you meet. In Charisma University, you will learn how to start conversations with anyone confidently, how to keep conversations going smoothly, how to avoid awkward silences, how to ask engaging questions, how to listen actively, how to show empathy, how to use humor, how to flirt, how to deal with difficult people, and more.
-You will learn how to tell captivating stories that make people listen
-Stories are powerful tools for communicating your personality, values, experiences, and emotions. They can also entertain, educate, inspire, and persuade others. In Charisma University, you will learn how to tell captivating stories that make people listen by using storytelling techniques such as structure, conflict, emotion, sensory details, humor, and more. You will also learn how to adapt your stories for different audiences, situations, and purposes.
-You will learn how to develop a magnetic presence that attracts attention and respect
-Presence is the ability to be fully present in the moment, to be aware of yourself and others, and to project your energy outward. When you have a magnetic presence, you attract attention and respect from others. You also feel more confident, happy, and alive. In Charisma University, you will learn how to develop a magnetic presence by using techniques such as posture, movement, eye contact, vocal projection, smiling, and more. You will also learn how to use your presence for different effects such as dominance, warmth, mystery, and more.
-You will learn how to lead and influence others with charisma
-Charisma is not only useful for making friends and having fun. It's also useful for leading and influencing others in your personal and professional life. In Charisma University, you will learn how to lead and influence others with charisma by using techniques such as framing, persuasion, negotiation, motivation, inspiration, and more. You will also learn how to deal with common challenges such as conflict resolution, criticism, rejection, and more.
-What are the features of Charisma University?
-Charisma University is a 6-week step-by-step program with over 10 hours of video content
-Charisma University is divided into six modules that cover each aspect of charisma in detail. Each module consists of several video lessons that explain the concepts and demonstrate them with real-life examples. Each video lesson is accompanied by a daily action guide that gives you specific exercises to practice what you learned and apply it in your own life. You can watch the videos at your own pace and access them anytime from any device.
-How to be more charismatic with Charlie Houpert
-Charisma University review: Is it worth it?
-Charlie Houpert's secrets to master social skills
-What is Charisma University and how can it help you?
-Learn from Charlie Houpert: The charisma coach
-Charisma University: A comprehensive course on charisma
-How Charlie Houpert changed my life with Charisma University
-The benefits of joining Charisma University by Charlie Houpert
-How to enroll in Charisma University and boost your charisma
-Charlie Houpert's tips on how to be confident and charismatic
-Charisma University: The ultimate guide to charisma by Charlie Houpert
-How to improve your communication skills with Charisma University
-Charlie Houpert's story: How he became a charisma expert
-Charisma University testimonials: What people are saying about it
-How to access Charisma University and learn from Charlie Houpert
-Charlie Houpert's advice on how to make a good first impression
-Charisma University: How it works and what you will learn
-How to develop your charisma with Charlie Houpert's methods
-Charisma University FAQ: Everything you need to know
-Charlie Houpert's best practices on how to be more likable
-Charisma University: The best investment for your personal growth
-How to overcome social anxiety with Charisma University
-Charlie Houpert's insights on how to be more influential
-Charisma University: A proven system to become more charismatic
-How to build rapport and trust with Charisma University
-Charlie Houpert's strategies on how to be more persuasive
-Charisma University: A step-by-step program to master charisma
-How to handle difficult conversations with Charisma University
-Charlie Houpert's techniques on how to be more funny and witty
-Charisma University: A game-changer for your social life
-How to create a positive impact with Charisma University
-Charlie Houpert's lessons on how to be more authentic and genuine
-Charisma University: A transformational journey to charisma
-How to deal with criticism and rejection with Charisma University
-Charlie Houpert's hacks on how to be more attractive and charming
-Charisma University: A must-have for anyone who wants to be more charismatic
-How to boost your self-esteem and confidence with Charisma University
-Charlie Houpert's wisdom on how to be more inspiring and motivational
-Charisma University: A unique opportunity to learn from a charisma guru
-How to connect with anyone with Charisma University
-Charisma University includes daily action guides , worksheets , cheat sheets , and bonuses
-In addition to the video lessons , Charisma University also provides you with various resources to help you get the most out of the program . These include : - Daily action guides : These are PDF documents that summarize the main points of each video lesson and give you specific exercises to practice what you learned and apply it in your own life . They also include tips , reminders , challenges , and extra resources . - Worksheets : These are PDF documents that help you go deeper into some topics and reflect on your progress . They include questions , quizzes , self-assessments , checklists , templates , scripts , etc . - Cheat sheets : These are PDF documents that provide you with quick reference guides for some topics . They include summaries , formulas , frameworks , examples , etc . - Bonuses : These are extra video lessons that cover some advanced topics or special situations . They include topics such as : How To Be Funny , How To Be More Attractive , How To Network Effectively , How To Deal With Bullies , How To Be More Productive , etc .
-Charisma University offers a 60-day money-back guarantee and lifetime access
-Charisma University is confident that their program will help you improve your social skills and become more charismatic . That's why they offer a 60-day money-back guarantee . If for Continuing the article.
How can you enroll in Charisma University?
-You can enroll in Charisma University by visiting the official website and choosing your payment option
-If you are interested in joining Charisma University, you can enroll by visiting the official website at https://www.charismaoncommand.com/c-university/. There, you will find more information about the program, the curriculum, the instructors, and the testimonials from previous students. You will also be able to watch some free videos that will give you a taste of what you will learn in Charisma University.
-To enroll, you will need to choose your payment option. You can either pay in full or pay in six monthly installments. Both options come with a 60-day money-back guarantee, which means you can try Charisma University risk-free for two months. If you are not satisfied with the program for any reason, you can request a full refund within 60 days of your purchase.
-You can also watch some free videos and read some testimonials from previous students
-If you are not sure if Charisma University is right for you, you can also watch some free videos and read some testimonials from previous students. These will give you an idea of what you can expect from the program and how it has helped other people improve their social skills and charisma.
-Some of the free videos you can watch include: - How To Be More Confident In Any Situation - How To Make A Great First Impression - How To Be Funny And Make People Laugh - How To Tell A Captivating Story - How To Be More Attractive Some of the testimonials you can read include: - "Charisma University gave me the opportunity to finish my Ph.D. in clinical psychology, which has made a massive impact on my career. I have been able to teach at a university, work toward doctoral-level licensure, and gain employment at a psychiatric hospital. I cannot express how grateful I am for Charisma University." - Student Testimonies | About | Charisma University - "Charisma University is Charlie Houpert’s online course on social skills. The goal of Charisma University is to increase your social skills and teach you how to become more charismatic . This is a review of Charisma University." - Charisma University: Brutally Honest Review & Summary | Power Dynamics™ - "Very efficient and prompt feedbacks." - Charisma University - Reviews | Facebook
- Conclusion
-Charisma University is an online course that teaches you how to improve your social skills and become more charismatic. It is created by Charlie Houpert, the co-founder of Charisma on Command, a YouTube channel and a book on charisma. It is a 6-week step-by-step program that covers everything from making a great first impression, to building confidence, to mastering conversation skills, to telling captivating stories, to developing a magnetic presence, to leading and influencing others with charisma. It also includes daily action guides, worksheets, cheat sheets, and bonuses. It offers a 60-day money-back guarantee and lifetime access. You can enroll in Charisma University by visiting the official website and choosing your payment option. You can also watch some free videos and read some testimonials from previous students.
-If you want to take your charisma to the next level and enjoy more success and happiness in your personal and professional life, Charisma University might be the perfect course for you.
- FAQs
-What is charisma?
-Charisma is the ability to attract, influence, and inspire others with your personality and presence. It is a combination of confidence, charm, humor, warmth, authenticity, and other qualities that make people like you and want to follow you.
-Why is charisma important?
-Charisma is important because it can help you achieve your goals and dreams in life. With charisma, you can make a great impression on anyone you meet, connect with people easily, tell captivating stories, and lead with influence. You can also enjoy more happiness, confidence, and fulfillment in your life.
-Can charisma be learned?
-Yes, charisma can be learned. While some people may have a natural talent for charisma, anyone can improve their charisma with practice and guidance. Charisma is not a fixed trait that you are born with or without. It is a skill that you can develop and refine over time.
-How long does it take to complete Charisma University?
-Charisma University is designed to be completed in 6 weeks. Each week consists of several video lessons that last about 15 minutes each. You will also need to spend some time doing the exercises in the daily action guides and worksheets. However, you can go at your own pace and access the program anytime from any device.
-How much does Charisma University cost?
-Charisma University costs $597 if you pay in full or $119 if you pay in six monthly installments. Both options come with a 60-day money-back guarantee and lifetime access.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/realgenius/NousResearch-Yarn-Mistral-7b-128k/app.py b/spaces/realgenius/NousResearch-Yarn-Mistral-7b-128k/app.py
deleted file mode 100644
index d5e12988d70a9beb1556e0db3295fa7a1ccf0306..0000000000000000000000000000000000000000
--- a/spaces/realgenius/NousResearch-Yarn-Mistral-7b-128k/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/NousResearch/Yarn-Mistral-7b-128k").launch()
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Airy Youtube Downloader Crack Pro [PORTABLE].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Airy Youtube Downloader Crack Pro [PORTABLE].md
deleted file mode 100644
index fd537bb76c18b0857f7147aa03e20da5bff5633b..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Airy Youtube Downloader Crack Pro [PORTABLE].md
+++ /dev/null
@@ -1,81 +0,0 @@
-
----> ServiceClient failure for DeepLeo[/ERROR]
-airy youtube downloader crack pro Download Zip ✵✵✵ https://urlgoal.com/2uCKtE
-Alternatives to Airy YouTube Downloader Crack Pro
-If you are looking for some alternatives to Airy YouTube Downloader Crack Pro, you may want to check out these other software that can also download YouTube videos to your computer:
-
-4K Video Downloader : This is a software that lets you download YouTube videos in 4K quality. You can also download playlists, channels, subtitles, and 360-degree videos. You can also extract audio from YouTube videos and save them as MP3, M4A, or OGG. You can also download videos from other sites such as Vimeo, TikTok, Facebook, etc.
-YTD Video Downloader : This is a software that lets you download YouTube videos in various formats and resolutions. You can also convert YouTube videos to MP3 or other formats. You can also download videos from other sites such as Dailymotion, Metacafe, etc.
-Freemake Video Downloader : This is a software that lets you download YouTube videos in HD quality. You can also download playlists, channels, and user favorites. You can also convert YouTube videos to MP3 or other formats. You can also download videos from other sites such as Facebook, Vimeo, etc.
-
-FAQs about Airy YouTube Downloader Crack Pro
-Here are some frequently asked questions and answers about Airy YouTube Downloader Crack Pro:
-
-Is Airy YouTube Downloader Crack Pro safe to use?
-Airy YouTube Downloader Crack Pro may not be safe to use because it is a cracked software that may contain viruses, malware, or spyware. It may also violate the terms and conditions of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, it is recommended to use a reliable antivirus software and a VPN service when using Airy YouTube Downloader Crack Pro.
-Is Airy YouTube Downloader Crack Pro legal to use?
-Airy YouTube Downloader Crack Pro may not be legal to use because it is a cracked software that may infringe the intellectual property rights of the original developers and owners of the software. It may also violate the copyright laws and regulations of the countries where you use it. It may also breach the privacy and security policies of YouTube and other websites. Therefore, it is advised to use Airy YouTube Downloader Crack Pro at your own risk and responsibility.
-
-How to uninstall Airy YouTube Downloader Crack Pro?
-To uninstall Airy YouTube Downloader Crack Pro, you need to follow these steps:
-
-Go to the Control Panel on your computer.
-Select Programs and Features or Add or Remove Programs.
-Find Airy YouTube Downloader Crack Pro in the list of programs and click on Uninstall.
-Follow the instructions on the screen to complete the uninstallation.
-Delete any leftover files or folders related to Airy YouTube Downloader Crack Pro on your computer.
-
-
-How to Contact Airy YouTube Downloader Crack Pro Support?
-If you have any questions, issues, or feedback about Airy YouTube Downloader Crack Pro, you may want to contact the support team of the software. However, since it is a cracked software, you may not get any official or reliable support from the original developers or owners of the software. You may also face some legal or ethical consequences for using a cracked software. Therefore, it is suggested to use Airy YouTube Downloader Crack Pro at your own risk and discretion.
-However, if you still want to contact the support team of Airy YouTube Downloader Crack Pro, you can try these methods:
-
-Email : You can send an email to the support team of Airy YouTube Downloader Crack Pro at [email protected] You can also use the contact form on their website at https://airy-youtube-downloader.com/contact-us/. However, you may not get any response or solution from them.
-Phone : You can call the support team of Airy YouTube Downloader Crack Pro at +1-888-881-9070. However, you may not get any answer or assistance from them.
-Live Chat : You can chat with the support team of Airy YouTube Downloader Crack Pro on their website at https://airy-youtube-downloader.com/. However, you may not get any reply or help from them.
-
-Conclusion
-Airy YouTube Downloader Crack Pro is a software that lets you download YouTube videos to your computer with ease. You can download videos in various formats and resolutions, and also convert them to MP3. You can also share downloaded videos with your friends and organize them with the bookmark manager function. However, you should also be aware of the risks and limitations of using cracked software. It may not be safe, legal, or reliable to use. It may also violate the rights and policies of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, we recommend you to use a legitimate and licensed software instead of Airy YouTube Downloader Crack Pro. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to comment below.
-How to Update Airy YouTube Downloader Crack Pro?
-If you are using Airy YouTube Downloader Crack Pro, you may want to update it to the latest version to enjoy the new features and improvements. However, since it is a cracked software, you may not be able to update it automatically or manually. You may also face some errors or issues while updating it. You may also lose the crack or activation of the software after updating it. Therefore, it is advised to use Airy YouTube Downloader Crack Pro at your own risk and responsibility.
-However, if you still want to update Airy YouTube Downloader Crack Pro, you can try these methods:
-
-Automatic Update : You can check if there is any automatic update available for Airy YouTube Downloader Crack Pro by opening the software and clicking on the "Help" menu. Then, click on "Check for Updates" and see if there is any new version available. If there is, click on "Download and Install" and follow the instructions on the screen. However, you may not get any automatic update for Airy YouTube Downloader Crack Pro because it is a cracked software.
-Manual Update : You can check if there is any manual update available for Airy YouTube Downloader Crack Pro by visiting the websites that offer Airy YouTube Downloader Crack Pro for free download, such as FileCR, Pesktop, or Gandhishipping. Then, look for the latest version of Airy YouTube Downloader Crack Pro and download it to your computer. After downloading it, you need to uninstall the old version of Airy YouTube Downloader Crack Pro and install the new version. However, you may not get any manual update for Airy YouTube Downloader Crack Pro because it is a cracked software.
-
-How to Uninstall Airy YouTube Downloader Crack Pro?
-If you want to uninstall Airy YouTube Downloader Crack Pro from your computer, you need to follow these steps:
-
-Go to the Control Panel on your computer.
-Select Programs and Features or Add or Remove Programs.
-Find Airy YouTube Downloader Crack Pro in the list of programs and click on Uninstall.
-Follow the instructions on the screen to complete the uninstallation.
-Delete any leftover files or folders related to Airy YouTube Downloader Crack Pro on your computer.
-
-Conclusion
-Airy YouTube Downloader Crack Pro is a software that lets you download YouTube videos to your computer with ease. You can download videos in various formats and resolutions, and also convert them to MP3. You can also share downloaded videos with your friends and organize them with the bookmark manager function. However, you should also be aware of the risks and limitations of using cracked software. It may not be safe, legal, or reliable to use. It may also violate the rights and policies of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, we recommend you to use a legitimate and licensed software instead of Airy YouTube Downloader Crack Pro. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to comment below.
-How to Use Airy YouTube Downloader Crack Pro with Other Devices?
-If you want to use Airy YouTube Downloader Crack Pro with other devices such as smartphones, tablets, or TVs, you need to follow these steps:
-
-Download and install Airy YouTube Downloader Crack Pro on your computer.
-Download and install a media server software on your computer, such as Plex, Kodi, or VLC.
-Launch the media server software and add the folder where you save your downloaded videos from Airy YouTube Downloader Crack Pro.
-Connect your other devices to the same network as your computer.
-Launch the media server app on your other devices and look for the folder where you save your downloaded videos from Airy YouTube Downloader Crack Pro.
-Select the video that you want to watch and enjoy it on your other devices.
-
-How to Troubleshoot Airy YouTube Downloader Crack Pro?
-If you face any problems or errors while using Airy YouTube Downloader Crack Pro, you can try these solutions:
-
-Check your internet connection : Make sure that your internet connection is stable and fast enough to download YouTube videos. You can also try using a different browser or device to access YouTube.
-Check your firewall or antivirus settings : Make sure that your firewall or antivirus software is not blocking or deleting Airy YouTube Downloader Crack Pro or its files. You can also try disabling or adding an exception for Airy YouTube Downloader Crack Pro in your firewall or antivirus settings.
-Check your video URL : Make sure that the video URL that you paste into Airy YouTube Downloader Crack Pro is valid and correct. You can also try copying the video URL from a different source or site.
-Check your video format and resolution : Make sure that the video format and resolution that you select in Airy YouTube Downloader Crack Pro are compatible with your device and player. You can also try changing the video format and resolution to a different option.
-Check your crack or activation : Make sure that your crack or activation of Airy YouTube Downloader Crack Pro is working properly and not expired or corrupted. You can also try reapplying the crack or activation of Airy YouTube Downloader Crack Pro.
-
-Conclusion
-Airy YouTube Downloader Crack Pro is a software that lets you download YouTube videos to your computer with ease. You can download videos in various formats and resolutions, and also convert them to MP3. You can also share downloaded videos with your friends and organize them with the bookmark manager function. However, you should also be aware of the risks and limitations of using cracked software. It may not be safe, legal, or reliable to use. It may also violate the rights and policies of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, we recommend you to use a legitimate and licensed software instead of Airy YouTube Downloader Crack Pro. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to comment below.
-Conclusion
-Airy YouTube Downloader Crack Pro is a software that lets you download YouTube videos to your computer with ease. You can download videos in various formats and resolutions, and also convert them to MP3. You can also share downloaded videos with your friends and organize them with the bookmark manager function. However, you should also be aware of the risks and limitations of using cracked software. It may not be safe, legal, or reliable to use. It may also violate the rights and policies of YouTube and other websites. It may also expose your personal information or data to hackers or third parties. Therefore, we recommend you to use a legitimate and licensed software instead of Airy YouTube Downloader Crack Pro. We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to comment below.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cambridge English Pronouncing Dictionary 17th Edition HOT Download.rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cambridge English Pronouncing Dictionary 17th Edition HOT Download.rar.md
deleted file mode 100644
index f21d142d440d3d2fcadc9413ba1f34cd14a54733..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cambridge English Pronouncing Dictionary 17th Edition HOT Download.rar.md
+++ /dev/null
@@ -1,89 +0,0 @@
-
-How to Download Cambridge English Pronouncing Dictionary 17th Edition in RAR Format
-
-If you are looking for a reliable and comprehensive source of English pronunciation, you might want to download Cambridge English Pronouncing Dictionary 17th Edition in RAR format. This dictionary is one of the most widely used and respected guides to English pronunciation, covering over 230,000 words and phrases in both British and American English. It also includes information on regional and social variations, as well as the pronunciation of proper names, foreign words, and abbreviations.
-Cambridge English Pronouncing Dictionary 17th Edition Download.rar Download … https://urlgoal.com/2uCJXR
-
-Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is easy and convenient, as it allows you to save space and time. RAR is a compressed file format that can reduce the size of large files and make them easier to transfer and store. However, you will need a special software to open and extract the files from the RAR archive.
-
-Steps to Download Cambridge English Pronouncing Dictionary 17th Edition in RAR Format
-
-Here are the steps you need to follow to download Cambridge English Pronouncing Dictionary 17th Edition in RAR format:
-
-
-Go to the Internet Archive website and search for "Cambridge English Pronouncing Dictionary 17th Edition". You will find several results that contain the dictionary in different formats.
-Select the result that has the RAR extension in the file name. For example, "cambridge-english-pronouncing-dictionary-17th-edition_202012.rar". This is the file you want to download.
-Click on the "DOWNLOAD OPTIONS" button and choose "RAR" from the list. This will start the download process.
-Wait for the download to finish. Depending on your internet speed and the file size, this may take a few minutes or longer.
-Once the download is complete, locate the file on your computer. It should be in your default download folder or wherever you chose to save it.
-Right-click on the file and select "Extract Here" or "Extract to" from the menu. You will need a software that can handle RAR files, such as WinRAR or 7-Zip. If you don't have one, you can download and install it from their official websites.
-After extracting the files, you will see a folder that contains the dictionary files. You can open them with any PDF reader or browser.
-
-
-Benefits of Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR Format
-
-Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format has several benefits, such as:
-
-
-It saves space on your computer or device, as RAR files are smaller than other formats.
-It reduces the download time, as RAR files are faster to transfer and download.
-It preserves the quality and integrity of the original files, as RAR files are lossless and secure.
-It allows you to access the dictionary offline, without needing an internet connection.
-
-
-Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is a smart and convenient way to improve your English pronunciation skills. With this dictionary, you can learn how to pronounce any word or phrase correctly and confidently.
-
-Features of Cambridge English Pronouncing Dictionary 17th Edition
-
-Cambridge English Pronouncing Dictionary 17th Edition is not just a simple dictionary of pronunciation. It is also a rich source of information and guidance on various aspects of English language and culture. Some of the features of this dictionary are:
-
-
-It covers over 230,000 words and phrases in both British and American English, as well as other varieties of English spoken around the world.
-It includes the pronunciation of proper names, foreign words, abbreviations, acronyms, and symbols.
-It provides information on regional and social variations in pronunciation, such as Cockney, Scottish, Australian, Indian, or African American English.
-It explains the relationship between spelling and sound, and the rules and patterns of English pronunciation.
-It offers advice on common areas of difficulty, such as stress, intonation, rhythm, and weak forms.
-It contains lively study pages that illustrate and practice various topics related to pronunciation, such as homophones, word stress, or silent letters.
-
-
-Cambridge English Pronouncing Dictionary 17th Edition is an essential tool for anyone who wants to master the pronunciation of English. Whether you are a student, a teacher, a professional, or a casual learner, you will find this dictionary invaluable and enjoyable.
-
-Why You Should Download Cambridge English Pronouncing Dictionary 17th Edition in RAR Format
-
-Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is a smart choice for several reasons. First of all, you will get access to the most up-to-date and authoritative pronouncing dictionary in English. You will be able to learn how to pronounce any word or phrase correctly and confidently. You will also be able to improve your listening and speaking skills by following the clear and accurate audio recordings that accompany the dictionary entries.
-
-Secondly, you will save space and time by downloading the dictionary in RAR format. RAR is a compressed file format that can reduce the size of large files and make them easier to transfer and store. You will be able to download the dictionary faster and use less storage space on your computer or device. You will also be able to open and extract the files from the RAR archive with a simple software that you can download and install for free.
-
-Thirdly, you will be able to use the dictionary offline, without needing an internet connection. You will be able to access the dictionary anytime and anywhere you want. You will also be able to print or copy the dictionary pages if you need to. You will have more flexibility and convenience in using the dictionary as you wish.
-
-Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is a wise and convenient way to improve your English pronunciation skills. With this dictionary, you will be able to learn how to pronounce any word or phrase correctly and confidently.
-How to Use Cambridge English Pronouncing Dictionary 17th Edition
-
-Once you have downloaded and extracted Cambridge English Pronouncing Dictionary 17th Edition in RAR format, you can start using it right away. Here are some tips on how to use the dictionary effectively:
-
-
-To find the pronunciation of a word or phrase, type it in the search box or browse the alphabetical list of entries. You will see the word or phrase in bold, followed by its phonetic transcription and audio recording. You can click on the speaker icon to listen to the pronunciation. You can also see the word class, the origin, and the usage notes of the word or phrase.
-To learn more about a specific aspect of pronunciation, go to the study pages at the end of the dictionary. You will find various topics related to pronunciation, such as homophones, word stress, or silent letters. Each topic has an explanation, examples, and exercises to help you understand and practice.
-To compare the pronunciation of different varieties of English, go to the regional and social variations section at the beginning of the dictionary. You will find information on how English is pronounced in different regions and social groups, such as Cockney, Scottish, Australian, Indian, or African American English. You will also find audio recordings of speakers from different backgrounds.
-
-
-Cambridge English Pronouncing Dictionary 17th Edition is a user-friendly and comprehensive dictionary that will help you improve your pronunciation skills. You can use it as a reference, a guide, or a learning tool. You can also customize it according to your preferences and needs.
-
-Conclusion
-
-Pronunciation is an important part of learning and communicating in English. It can affect your confidence, your comprehension, and your impression on others. That's why you need a reliable and authoritative source of pronunciation, such as Cambridge English Pronouncing Dictionary 17th Edition.
-
-Downloading Cambridge English Pronouncing Dictionary 17th Edition in RAR format is a smart and convenient way to access this dictionary. You will be able to save space and time, as well as use the dictionary offline. You will also be able to enjoy the features and benefits of this dictionary, such as:
-
-
-It covers over 230,000 words and phrases in both British and American English, as well as other varieties of English spoken around the world.
-It includes the pronunciation of proper names, foreign words, abbreviations, acronyms, and symbols.
-It provides information on regional and social variations in pronunciation.
-It explains the relationship between spelling and sound, and the rules and patterns of English pronunciation.
-It offers advice on common areas of difficulty.
-It contains lively study pages that illustrate and practice various topics related to pronunciation.
-
-
-If you want to improve your pronunciation skills and learn how to pronounce any word or phrase correctly and confidently, you should download Cambridge English Pronouncing Dictionary 17th Edition in RAR format today. It is an essential tool for anyone who wants to master the pronunciation of English.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Keygen Product Design Manufacturing Collection 2019 Download.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Keygen Product Design Manufacturing Collection 2019 Download.md
deleted file mode 100644
index 2bacd289e3d99365ce4bf43e1b8309b93fbbc63a..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crack LINK Keygen Product Design Manufacturing Collection 2019 Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-crack Keygen Product Design Manufacturing Collection 2019 download DOWNLOAD ❤ https://urlgoal.com/2uCJbT
-
-Subscribe to Product Design & Manufacturing Collection to get the tools you ... Trade-in your perpetual license serial number now and save 20% on a 3-year ... 4d29de3e1b
-
-
-
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crypto Box Dongle Emulator 11.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crypto Box Dongle Emulator 11.md
deleted file mode 100644
index bde9b0ec410071cf67851655951e9df83b865509..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Crypto Box Dongle Emulator 11.md
+++ /dev/null
@@ -1,52 +0,0 @@
-crypto box dongle emulator 11 DOWNLOAD ★★★ https://urlgoal.com/2uCJC2
-
-.3
-
-Download crypto box dongle emulator free
-
-The recently discovered malware infection script Crypto box dongle emulator was designed to steal personal data from infected computers and spread to other computers by using the free download skidrow.com/download-bitcoin-client/ and other similar types of malicious programs.
-
-Crypto box dongle emulator is a piece of sophisticated malware that does its work quietly and skilfully behind the scenes. In contrast, the virus creates a lot of annoyances, it adds a special Web browser for its operators to monitor the activity of the victims.
-
-It is one of the most dangerous threats that a hacker can imagine. Once the malware has been activated on the computer, the virus can then:
-
-Activate the camera.
-
-Capture keystrokes from the victim’s keyboard.
-
-Steal the victim’s contacts, including names, phone numbers and emails.
-
-Encrypt passwords on your PC.
-
-Install itself on your Windows system as a Startup item.
-
-Create a shortcut on your desktop and start the browser Crypto box dongle emulator automatically.
-
-It is not possible to delete this virus. The malware works in stealth mode. Moreover, it cannot be detected by anti-malware programs and is quite difficult to remove.
-
-Crypto box dongle emulator: one of the most dangerous threats
-
-It is not difficult to track down the source of this virus, because it is quite easy to identify the file that contains the malware and its directory path on your hard disk. The file can be found in: C:\Documents and Settings\user\Application Data\Broker or C:\Users\user\AppData\Roaming\Broker
-
-The folder named Crypto box dongle emulator contains the following files:
-
-C:\Documents and Settings\user\Application Data\Broker\Macro,
-
-Crypto box dongle emulator.dll,
-
-Crypto box dongle emulator.exe,
-
-Crypto box dongle emulator.conf,
-
-Crypto box dongle emulator.xml,
-
-Crypto box dongle emulator.ini,
-
-Crypto box dongle emulator.dat,
-
-Crypto box dongle emulator.crt.
-
-The virus is very dangerous for your PC and is a reason to worry. It is not just a nuisance for you; it could be used for hackers to steal 4fefd39f24
-
-
-
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Torrent Crack Apowersoft Gestionnaire De 11.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Torrent Crack Apowersoft Gestionnaire De 11.md
deleted file mode 100644
index aa4468c3bc060367a7fc9a5a8e0f2518488dee33..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Torrent Crack Apowersoft Gestionnaire De 11.md
+++ /dev/null
@@ -1,6 +0,0 @@
-download torrent crack apowersoft gestionnaire de 11 Download ————— https://urlgoal.com/2uCKnC
-
- 899543212b
-
-
-
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Thedarkknightrises720ptamildubbed).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Thedarkknightrises720ptamildubbed).md
deleted file mode 100644
index bf887f06ca1f7f1e069630f5aed2c8f90bf67a77..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Thedarkknightrises720ptamildubbed).md
+++ /dev/null
@@ -1,14 +0,0 @@
-HD Online Player (Thedarkknightrises720ptamildubbed) Download ✸ https://urlgoal.com/2uCMeR
-
-by dark players.. 14-Nov-2019 - (HD-720p.1080p)~The Dark Knight Rises FULL_MOVIE - 2012 Download FRE E. .House on Haunted Hill ( .) - HD-DVDRip.
-House by the lake (The Lake House).
-Film : The Lake House Original title: The Lake House Country: USA Release year: 2006 Genre: Melodrama,.
-The film is very emotional, light and positive.
-Lake House (2006) online.
-Watch The Lake House movie online in high quality for free.
-Lake House online movie in good quality, without registration and SMS.
-The Lake House 2006 HD - 720p: Download torrent.
-Lake House online movie in good quality, without registration and sms. 8a78ff9644
-
-
-
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (godzilla 1998 Download 720p Movie).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (godzilla 1998 Download 720p Movie).md
deleted file mode 100644
index 821bc891d9cdc573c85552e2fd3a613799da2355..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (godzilla 1998 Download 720p Movie).md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-If you want me to write another article, please provide a different query that does not involve the same keyword. You can also ask me to edit or improve the existing article, or to write a summary or a conclusion for it.
-
-Thank you for your cooperation.?
-HD Online Player (godzilla 1998 download 720p movie) Download File ★★★★★ https://urlgoal.com/2uCL1X
-
-
-If you want me to write another article, please provide a different query that does not involve the same keyword. You can also ask me to edit or improve the existing article, or to write a summary or a conclusion for it.
-
-Thank you for your cooperation.?
-
-
-If you want me to write another article, please provide a different query that does not involve the same keyword. You can also ask me to edit or improve the existing article, or to write a summary or a conclusion for it.
-
-Thank you for your cooperation.?
-
-
-If you want me to write another article, please provide a different query that does not involve the same keyword. You can also ask me to edit or improve the existing article, or to write a summary or a conclusion for it.
-
-Thank you for your cooperation and interest. Goodbye.? 3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/ririah13/Test/README.md b/spaces/ririah13/Test/README.md
deleted file mode 100644
index 2ace8e6f8db7834acb3c638f42edd92d6c6bf0e1..0000000000000000000000000000000000000000
--- a/spaces/ririah13/Test/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Test
-emoji: 😻
-colorFrom: indigo
-colorTo: green
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/robinhad/qirimtatar-tts/README.md b/spaces/robinhad/qirimtatar-tts/README.md
deleted file mode 100644
index 99ae061bd86821aaf04ffde16b217041d91e9387..0000000000000000000000000000000000000000
--- a/spaces/robinhad/qirimtatar-tts/README.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-title: Qirimtatar Tts
-emoji: 🦀
-colorFrom: gray
-colorTo: red
-sdk: gradio
-sdk_version: 3.40.1
-python_version: '3.10'
-app_file: app.py
-pinned: false
----
-
-# Crimean Tatar (Qirimtatar) TTS
-Text-to-Speech for Crimean Tatar language
-
- [](https://huggingface.co/spaces/robinhad/qirimtatar-tts)
-
-Source code: https://github.com/robinhad/qirimtatar-tts
-Online demo: https://huggingface.co/spaces/robinhad/qirimtatar-tts
-You're welcome to join UA Speech Recognition and Synthesis community: Telegram https://t.me/speech_recognition_uk
-Note: demo input is saved to improve Text-to-Speech engine and demo experience. By using this demo you give your consent to this.
-
-## Examples
-Test sentence:
-`Qırımtatarlar üç subetnik gruppasından er birisiniñ (tatlar, noğaylar ve yalıboylular) öz şivesi bar.`
-
-### Kemal
-
-https://user-images.githubusercontent.com/5759207/200072078-7ab22d95-73d3-4eb7-ab9f-6f0dadc950c1.mp4
-
-### Nuri
-
-https://user-images.githubusercontent.com/5759207/200072104-ab1c204a-fd16-43f4-94a9-bc8871a7c2e3.mp4
-
-### Arslan
-
-https://user-images.githubusercontent.com/5759207/200072123-e2816c40-9ecb-4a6f-9136-51fffc42f258.mp4
-
-# Attribution
-
-- Model training - [Yurii Paniv @robinhad](https://github.com/robinhad)
-- Crimean Tatar dataset - [Yehor Smoliakov @egorsmkv](https://github.com/egorsmkv)
-- Huge thanks for voice to: Nuri, Arslan, Kemal
-- Transliteration: [prosvita/crh.transliteration](https://github.com/prosvita/crh.transliteration)
diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/trident_resnet.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/trident_resnet.py
deleted file mode 100644
index 013ba64b59d81e5be3a3f00b65c6a76915247c9d..0000000000000000000000000000000000000000
--- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/trident_resnet.py
+++ /dev/null
@@ -1,298 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as cp
-from mmcv.cnn import build_conv_layer, build_norm_layer
-from mmcv.runner import BaseModule
-from torch.nn.modules.utils import _pair
-
-from mmdet.models.backbones.resnet import Bottleneck, ResNet
-from mmdet.models.builder import BACKBONES
-
-
-class TridentConv(BaseModule):
- """Trident Convolution Module.
-
- Args:
- in_channels (int): Number of channels in input.
- out_channels (int): Number of channels in output.
- kernel_size (int): Size of convolution kernel.
- stride (int, optional): Convolution stride. Default: 1.
- trident_dilations (tuple[int, int, int], optional): Dilations of
- different trident branch. Default: (1, 2, 3).
- test_branch_idx (int, optional): In inference, all 3 branches will
- be used if `test_branch_idx==-1`, otherwise only branch with
- index `test_branch_idx` will be used. Default: 1.
- bias (bool, optional): Whether to use bias in convolution or not.
- Default: False.
- init_cfg (dict or list[dict], optional): Initialization config dict.
- Default: None
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- trident_dilations=(1, 2, 3),
- test_branch_idx=1,
- bias=False,
- init_cfg=None):
- super(TridentConv, self).__init__(init_cfg)
- self.num_branch = len(trident_dilations)
- self.with_bias = bias
- self.test_branch_idx = test_branch_idx
- self.stride = _pair(stride)
- self.kernel_size = _pair(kernel_size)
- self.paddings = _pair(trident_dilations)
- self.dilations = trident_dilations
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.bias = bias
-
- self.weight = nn.Parameter(
- torch.Tensor(out_channels, in_channels, *self.kernel_size))
- if bias:
- self.bias = nn.Parameter(torch.Tensor(out_channels))
- else:
- self.bias = None
-
- def extra_repr(self):
- tmpstr = f'in_channels={self.in_channels}'
- tmpstr += f', out_channels={self.out_channels}'
- tmpstr += f', kernel_size={self.kernel_size}'
- tmpstr += f', num_branch={self.num_branch}'
- tmpstr += f', test_branch_idx={self.test_branch_idx}'
- tmpstr += f', stride={self.stride}'
- tmpstr += f', paddings={self.paddings}'
- tmpstr += f', dilations={self.dilations}'
- tmpstr += f', bias={self.bias}'
- return tmpstr
-
- def forward(self, inputs):
- if self.training or self.test_branch_idx == -1:
- outputs = [
- F.conv2d(input, self.weight, self.bias, self.stride, padding,
- dilation) for input, dilation, padding in zip(
- inputs, self.dilations, self.paddings)
- ]
- else:
- assert len(inputs) == 1
- outputs = [
- F.conv2d(inputs[0], self.weight, self.bias, self.stride,
- self.paddings[self.test_branch_idx],
- self.dilations[self.test_branch_idx])
- ]
-
- return outputs
-
-
-# Since TridentNet is defined over ResNet50 and ResNet101, here we
-# only support TridentBottleneckBlock.
-class TridentBottleneck(Bottleneck):
- """BottleBlock for TridentResNet.
-
- Args:
- trident_dilations (tuple[int, int, int]): Dilations of different
- trident branch.
- test_branch_idx (int): In inference, all 3 branches will be used
- if `test_branch_idx==-1`, otherwise only branch with index
- `test_branch_idx` will be used.
- concat_output (bool): Whether to concat the output list to a Tensor.
- `True` only in the last Block.
- """
-
- def __init__(self, trident_dilations, test_branch_idx, concat_output,
- **kwargs):
-
- super(TridentBottleneck, self).__init__(**kwargs)
- self.trident_dilations = trident_dilations
- self.num_branch = len(trident_dilations)
- self.concat_output = concat_output
- self.test_branch_idx = test_branch_idx
- self.conv2 = TridentConv(
- self.planes,
- self.planes,
- kernel_size=3,
- stride=self.conv2_stride,
- bias=False,
- trident_dilations=self.trident_dilations,
- test_branch_idx=test_branch_idx,
- init_cfg=dict(
- type='Kaiming',
- distribution='uniform',
- mode='fan_in',
- override=dict(name='conv2')))
-
- def forward(self, x):
-
- def _inner_forward(x):
- num_branch = (
- self.num_branch
- if self.training or self.test_branch_idx == -1 else 1)
- identity = x
- if not isinstance(x, list):
- x = (x, ) * num_branch
- identity = x
- if self.downsample is not None:
- identity = [self.downsample(b) for b in x]
-
- out = [self.conv1(b) for b in x]
- out = [self.norm1(b) for b in out]
- out = [self.relu(b) for b in out]
-
- if self.with_plugins:
- for k in range(len(out)):
- out[k] = self.forward_plugin(out[k],
- self.after_conv1_plugin_names)
-
- out = self.conv2(out)
- out = [self.norm2(b) for b in out]
- out = [self.relu(b) for b in out]
- if self.with_plugins:
- for k in range(len(out)):
- out[k] = self.forward_plugin(out[k],
- self.after_conv2_plugin_names)
-
- out = [self.conv3(b) for b in out]
- out = [self.norm3(b) for b in out]
-
- if self.with_plugins:
- for k in range(len(out)):
- out[k] = self.forward_plugin(out[k],
- self.after_conv3_plugin_names)
-
- out = [
- out_b + identity_b for out_b, identity_b in zip(out, identity)
- ]
- return out
-
- if self.with_cp and x.requires_grad:
- out = cp.checkpoint(_inner_forward, x)
- else:
- out = _inner_forward(x)
-
- out = [self.relu(b) for b in out]
- if self.concat_output:
- out = torch.cat(out, dim=0)
- return out
-
-
-def make_trident_res_layer(block,
- inplanes,
- planes,
- num_blocks,
- stride=1,
- trident_dilations=(1, 2, 3),
- style='pytorch',
- with_cp=False,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- dcn=None,
- plugins=None,
- test_branch_idx=-1):
- """Build Trident Res Layers."""
-
- downsample = None
- if stride != 1 or inplanes != planes * block.expansion:
- downsample = []
- conv_stride = stride
- downsample.extend([
- build_conv_layer(
- conv_cfg,
- inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=conv_stride,
- bias=False),
- build_norm_layer(norm_cfg, planes * block.expansion)[1]
- ])
- downsample = nn.Sequential(*downsample)
-
- layers = []
- for i in range(num_blocks):
- layers.append(
- block(
- inplanes=inplanes,
- planes=planes,
- stride=stride if i == 0 else 1,
- trident_dilations=trident_dilations,
- downsample=downsample if i == 0 else None,
- style=style,
- with_cp=with_cp,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- dcn=dcn,
- plugins=plugins,
- test_branch_idx=test_branch_idx,
- concat_output=True if i == num_blocks - 1 else False))
- inplanes = planes * block.expansion
- return nn.Sequential(*layers)
-
-
-@BACKBONES.register_module()
-class TridentResNet(ResNet):
- """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
- ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
- normal BottleBlock to yield trident output. Different branch shares the
- convolution weight but uses different dilations to achieve multi-scale
- output.
-
- / stage3(b0) \
- x - stem - stage1 - stage2 - stage3(b1) - output
- \ stage3(b2) /
-
- Args:
- depth (int): Depth of resnet, from {50, 101, 152}.
- num_branch (int): Number of branches in TridentNet.
- test_branch_idx (int): In inference, all 3 branches will be used
- if `test_branch_idx==-1`, otherwise only branch with index
- `test_branch_idx` will be used.
- trident_dilations (tuple[int]): Dilations of different trident branch.
- len(trident_dilations) should be equal to num_branch.
- """ # noqa
-
- def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
- **kwargs):
-
- assert num_branch == len(trident_dilations)
- assert depth in (50, 101, 152)
- super(TridentResNet, self).__init__(depth, **kwargs)
- assert self.num_stages == 3
- self.test_branch_idx = test_branch_idx
- self.num_branch = num_branch
-
- last_stage_idx = self.num_stages - 1
- stride = self.strides[last_stage_idx]
- dilation = trident_dilations
- dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
- if self.plugins is not None:
- stage_plugins = self.make_stage_plugins(self.plugins,
- last_stage_idx)
- else:
- stage_plugins = None
- planes = self.base_channels * 2**last_stage_idx
- res_layer = make_trident_res_layer(
- TridentBottleneck,
- inplanes=(self.block.expansion * self.base_channels *
- 2**(last_stage_idx - 1)),
- planes=planes,
- num_blocks=self.stage_blocks[last_stage_idx],
- stride=stride,
- trident_dilations=dilation,
- style=self.style,
- with_cp=self.with_cp,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- dcn=dcn,
- plugins=stage_plugins,
- test_branch_idx=self.test_branch_idx)
-
- layer_name = f'layer{last_stage_idx + 1}'
-
- self.__setattr__(layer_name, res_layer)
- self.res_layers.pop(last_stage_idx)
- self.res_layers.insert(last_stage_idx, layer_name)
-
- self._freeze_stages()
diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/maskformer_head.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/maskformer_head.py
deleted file mode 100644
index 566dc074059ef770892d2916e7c44fa54b0f8758..0000000000000000000000000000000000000000
--- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/maskformer_head.py
+++ /dev/null
@@ -1,556 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init
-from mmcv.cnn.bricks.transformer import (build_positional_encoding,
- build_transformer_layer_sequence)
-from mmcv.runner import force_fp32
-
-from mmdet.core import build_assigner, build_sampler, multi_apply, reduce_mean
-from mmdet.models.utils import preprocess_panoptic_gt
-from ..builder import HEADS, build_loss
-from .anchor_free_head import AnchorFreeHead
-
-
-@HEADS.register_module()
-class MaskFormerHead(AnchorFreeHead):
- """Implements the MaskFormer head.
-
- See `Per-Pixel Classification is Not All You Need for Semantic
- Segmentation `_ for details.
-
- Args:
- in_channels (list[int]): Number of channels in the input feature map.
- feat_channels (int): Number of channels for feature.
- out_channels (int): Number of channels for output.
- num_things_classes (int): Number of things.
- num_stuff_classes (int): Number of stuff.
- num_queries (int): Number of query in Transformer.
- pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel
- decoder. Defaults to None.
- enforce_decoder_input_project (bool, optional): Whether to add a layer
- to change the embed_dim of tranformer encoder in pixel decoder to
- the embed_dim of transformer decoder. Defaults to False.
- transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for
- transformer decoder. Defaults to None.
- positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for
- transformer decoder position encoding. Defaults to None.
- loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification
- loss. Defaults to `CrossEntropyLoss`.
- loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss.
- Defaults to `FocalLoss`.
- loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss.
- Defaults to `DiceLoss`.
- train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of
- Maskformer head.
- test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of Maskformer
- head.
- init_cfg (dict or list[dict], optional): Initialization config dict.
- Defaults to None.
- """
-
- def __init__(self,
- in_channels,
- feat_channels,
- out_channels,
- num_things_classes=80,
- num_stuff_classes=53,
- num_queries=100,
- pixel_decoder=None,
- enforce_decoder_input_project=False,
- transformer_decoder=None,
- positional_encoding=None,
- loss_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=1.0,
- class_weight=[1.0] * 133 + [0.1]),
- loss_mask=dict(
- type='FocalLoss',
- use_sigmoid=True,
- gamma=2.0,
- alpha=0.25,
- loss_weight=20.0),
- loss_dice=dict(
- type='DiceLoss',
- use_sigmoid=True,
- activate=True,
- naive_dice=True,
- loss_weight=1.0),
- train_cfg=None,
- test_cfg=None,
- init_cfg=None,
- **kwargs):
- super(AnchorFreeHead, self).__init__(init_cfg)
- self.num_things_classes = num_things_classes
- self.num_stuff_classes = num_stuff_classes
- self.num_classes = self.num_things_classes + self.num_stuff_classes
- self.num_queries = num_queries
-
- pixel_decoder.update(
- in_channels=in_channels,
- feat_channels=feat_channels,
- out_channels=out_channels)
- self.pixel_decoder = build_plugin_layer(pixel_decoder)[1]
- self.transformer_decoder = build_transformer_layer_sequence(
- transformer_decoder)
- self.decoder_embed_dims = self.transformer_decoder.embed_dims
- pixel_decoder_type = pixel_decoder.get('type')
- if pixel_decoder_type == 'PixelDecoder' and (
- self.decoder_embed_dims != in_channels[-1]
- or enforce_decoder_input_project):
- self.decoder_input_proj = Conv2d(
- in_channels[-1], self.decoder_embed_dims, kernel_size=1)
- else:
- self.decoder_input_proj = nn.Identity()
- self.decoder_pe = build_positional_encoding(positional_encoding)
- self.query_embed = nn.Embedding(self.num_queries, out_channels)
-
- self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1)
- self.mask_embed = nn.Sequential(
- nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),
- nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),
- nn.Linear(feat_channels, out_channels))
-
- self.test_cfg = test_cfg
- self.train_cfg = train_cfg
- if train_cfg:
- self.assigner = build_assigner(train_cfg.get('assigner', None))
- self.sampler = build_sampler(
- train_cfg.get('sampler', None), context=self)
-
- self.class_weight = loss_cls.get('class_weight', None)
- self.loss_cls = build_loss(loss_cls)
- self.loss_mask = build_loss(loss_mask)
- self.loss_dice = build_loss(loss_dice)
-
- def init_weights(self):
- if isinstance(self.decoder_input_proj, Conv2d):
- caffe2_xavier_init(self.decoder_input_proj, bias=0)
-
- self.pixel_decoder.init_weights()
-
- for p in self.transformer_decoder.parameters():
- if p.dim() > 1:
- nn.init.xavier_uniform_(p)
-
- def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs,
- img_metas):
- """Preprocess the ground truth for all images.
-
- Args:
- gt_labels_list (list[Tensor]): Each is ground truth
- labels of each bbox, with shape (num_gts, ).
- gt_masks_list (list[BitmapMasks]): Each is ground truth
- masks of each instances of a image, shape
- (num_gts, h, w).
- gt_semantic_seg (Tensor | None): Ground truth of semantic
- segmentation with the shape (batch_size, n, h, w).
- [0, num_thing_class - 1] means things,
- [num_thing_class, num_class-1] means stuff,
- 255 means VOID. It's None when training instance segmentation.
- img_metas (list[dict]): List of image meta information.
-
- Returns:
- tuple: a tuple containing the following targets.
- - labels (list[Tensor]): Ground truth class indices\
- for all images. Each with shape (n, ), n is the sum of\
- number of stuff type and number of instance in a image.
- - masks (list[Tensor]): Ground truth mask for each\
- image, each with shape (n, h, w).
- """
- num_things_list = [self.num_things_classes] * len(gt_labels_list)
- num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list)
- if gt_semantic_segs is None:
- gt_semantic_segs = [None] * len(gt_labels_list)
-
- targets = multi_apply(preprocess_panoptic_gt, gt_labels_list,
- gt_masks_list, gt_semantic_segs, num_things_list,
- num_stuff_list, img_metas)
- labels, masks = targets
- return labels, masks
-
- def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list,
- gt_masks_list, img_metas):
- """Compute classification and mask targets for all images for a decoder
- layer.
-
- Args:
- cls_scores_list (list[Tensor]): Mask score logits from a single
- decoder layer for all images. Each with shape (num_queries,
- cls_out_channels).
- mask_preds_list (list[Tensor]): Mask logits from a single decoder
- layer for all images. Each with shape (num_queries, h, w).
- gt_labels_list (list[Tensor]): Ground truth class indices for all
- images. Each with shape (n, ), n is the sum of number of stuff
- type and number of instance in a image.
- gt_masks_list (list[Tensor]): Ground truth mask for each image,
- each with shape (n, h, w).
- img_metas (list[dict]): List of image meta information.
-
- Returns:
- tuple[list[Tensor]]: a tuple containing the following targets.
- - labels_list (list[Tensor]): Labels of all images.\
- Each with shape (num_queries, ).
- - label_weights_list (list[Tensor]): Label weights\
- of all images. Each with shape (num_queries, ).
- - mask_targets_list (list[Tensor]): Mask targets of\
- all images. Each with shape (num_queries, h, w).
- - mask_weights_list (list[Tensor]): Mask weights of\
- all images. Each with shape (num_queries, ).
- - num_total_pos (int): Number of positive samples in\
- all images.
- - num_total_neg (int): Number of negative samples in\
- all images.
- """
- (labels_list, label_weights_list, mask_targets_list, mask_weights_list,
- pos_inds_list,
- neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list,
- mask_preds_list, gt_labels_list,
- gt_masks_list, img_metas)
-
- num_total_pos = sum((inds.numel() for inds in pos_inds_list))
- num_total_neg = sum((inds.numel() for inds in neg_inds_list))
- return (labels_list, label_weights_list, mask_targets_list,
- mask_weights_list, num_total_pos, num_total_neg)
-
- def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks,
- img_metas):
- """Compute classification and mask targets for one image.
-
- Args:
- cls_score (Tensor): Mask score logits from a single decoder layer
- for one image. Shape (num_queries, cls_out_channels).
- mask_pred (Tensor): Mask logits for a single decoder layer for one
- image. Shape (num_queries, h, w).
- gt_labels (Tensor): Ground truth class indices for one image with
- shape (n, ). n is the sum of number of stuff type and number
- of instance in a image.
- gt_masks (Tensor): Ground truth mask for each image, each with
- shape (n, h, w).
- img_metas (dict): Image informtation.
-
- Returns:
- tuple[Tensor]: a tuple containing the following for one image.
- - labels (Tensor): Labels of each image.
- shape (num_queries, ).
- - label_weights (Tensor): Label weights of each image.
- shape (num_queries, ).
- - mask_targets (Tensor): Mask targets of each image.
- shape (num_queries, h, w).
- - mask_weights (Tensor): Mask weights of each image.
- shape (num_queries, ).
- - pos_inds (Tensor): Sampled positive indices for each image.
- - neg_inds (Tensor): Sampled negative indices for each image.
- """
- target_shape = mask_pred.shape[-2:]
- if gt_masks.shape[0] > 0:
- gt_masks_downsampled = F.interpolate(
- gt_masks.unsqueeze(1).float(), target_shape,
- mode='nearest').squeeze(1).long()
- else:
- gt_masks_downsampled = gt_masks
-
- # assign and sample
- assign_result = self.assigner.assign(cls_score, mask_pred, gt_labels,
- gt_masks_downsampled, img_metas)
- sampling_result = self.sampler.sample(assign_result, mask_pred,
- gt_masks)
- pos_inds = sampling_result.pos_inds
- neg_inds = sampling_result.neg_inds
-
- # label target
- labels = gt_labels.new_full((self.num_queries, ),
- self.num_classes,
- dtype=torch.long)
- labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
- label_weights = gt_labels.new_ones(self.num_queries)
-
- # mask target
- mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds]
- mask_weights = mask_pred.new_zeros((self.num_queries, ))
- mask_weights[pos_inds] = 1.0
-
- return (labels, label_weights, mask_targets, mask_weights, pos_inds,
- neg_inds)
-
- @force_fp32(apply_to=('all_cls_scores', 'all_mask_preds'))
- def loss(self, all_cls_scores, all_mask_preds, gt_labels_list,
- gt_masks_list, img_metas):
- """Loss function.
-
- Args:
- all_cls_scores (Tensor): Classification scores for all decoder
- layers with shape (num_decoder, batch_size, num_queries,
- cls_out_channels). Note `cls_out_channels` should includes
- background.
- all_mask_preds (Tensor): Mask scores for all decoder layers with
- shape (num_decoder, batch_size, num_queries, h, w).
- gt_labels_list (list[Tensor]): Ground truth class indices for each
- image with shape (n, ). n is the sum of number of stuff type
- and number of instance in a image.
- gt_masks_list (list[Tensor]): Ground truth mask for each image with
- shape (n, h, w).
- img_metas (list[dict]): List of image meta information.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- num_dec_layers = len(all_cls_scores)
- all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
- all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)]
- img_metas_list = [img_metas for _ in range(num_dec_layers)]
- losses_cls, losses_mask, losses_dice = multi_apply(
- self.loss_single, all_cls_scores, all_mask_preds,
- all_gt_labels_list, all_gt_masks_list, img_metas_list)
-
- loss_dict = dict()
- # loss from the last decoder layer
- loss_dict['loss_cls'] = losses_cls[-1]
- loss_dict['loss_mask'] = losses_mask[-1]
- loss_dict['loss_dice'] = losses_dice[-1]
- # loss from other decoder layers
- num_dec_layer = 0
- for loss_cls_i, loss_mask_i, loss_dice_i in zip(
- losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]):
- loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
- loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i
- loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i
- num_dec_layer += 1
- return loss_dict
-
- def loss_single(self, cls_scores, mask_preds, gt_labels_list,
- gt_masks_list, img_metas):
- """Loss function for outputs from a single decoder layer.
-
- Args:
- cls_scores (Tensor): Mask score logits from a single decoder layer
- for all images. Shape (batch_size, num_queries,
- cls_out_channels). Note `cls_out_channels` should includes
- background.
- mask_preds (Tensor): Mask logits for a pixel decoder for all
- images. Shape (batch_size, num_queries, h, w).
- gt_labels_list (list[Tensor]): Ground truth class indices for each
- image, each with shape (n, ). n is the sum of number of stuff
- types and number of instances in a image.
- gt_masks_list (list[Tensor]): Ground truth mask for each image,
- each with shape (n, h, w).
- img_metas (list[dict]): List of image meta information.
-
- Returns:
- tuple[Tensor]: Loss components for outputs from a single decoder\
- layer.
- """
- num_imgs = cls_scores.size(0)
- cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
- mask_preds_list = [mask_preds[i] for i in range(num_imgs)]
-
- (labels_list, label_weights_list, mask_targets_list, mask_weights_list,
- num_total_pos,
- num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list,
- gt_labels_list, gt_masks_list,
- img_metas)
- # shape (batch_size, num_queries)
- labels = torch.stack(labels_list, dim=0)
- # shape (batch_size, num_queries)
- label_weights = torch.stack(label_weights_list, dim=0)
- # shape (num_total_gts, h, w)
- mask_targets = torch.cat(mask_targets_list, dim=0)
- # shape (batch_size, num_queries)
- mask_weights = torch.stack(mask_weights_list, dim=0)
-
- # classfication loss
- # shape (batch_size * num_queries, )
- cls_scores = cls_scores.flatten(0, 1)
- labels = labels.flatten(0, 1)
- label_weights = label_weights.flatten(0, 1)
-
- class_weight = cls_scores.new_tensor(self.class_weight)
- loss_cls = self.loss_cls(
- cls_scores,
- labels,
- label_weights,
- avg_factor=class_weight[labels].sum())
-
- num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos]))
- num_total_masks = max(num_total_masks, 1)
-
- # extract positive ones
- # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w)
- mask_preds = mask_preds[mask_weights > 0]
- target_shape = mask_targets.shape[-2:]
-
- if mask_targets.shape[0] == 0:
- # zero match
- loss_dice = mask_preds.sum()
- loss_mask = mask_preds.sum()
- return loss_cls, loss_mask, loss_dice
-
- # upsample to shape of target
- # shape (num_total_gts, h, w)
- mask_preds = F.interpolate(
- mask_preds.unsqueeze(1),
- target_shape,
- mode='bilinear',
- align_corners=False).squeeze(1)
-
- # dice loss
- loss_dice = self.loss_dice(
- mask_preds, mask_targets, avg_factor=num_total_masks)
-
- # mask loss
- # FocalLoss support input of shape (n, num_class)
- h, w = mask_preds.shape[-2:]
- # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1)
- mask_preds = mask_preds.reshape(-1, 1)
- # shape (num_total_gts, h, w) -> (num_total_gts * h * w)
- mask_targets = mask_targets.reshape(-1)
- # target is (1 - mask_targets) !!!
- loss_mask = self.loss_mask(
- mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w)
-
- return loss_cls, loss_mask, loss_dice
-
- def forward(self, feats, img_metas):
- """Forward function.
-
- Args:
- feats (list[Tensor]): Features from the upstream network, each
- is a 4D-tensor.
- img_metas (list[dict]): List of image information.
-
- Returns:
- tuple: a tuple contains two elements.
- - all_cls_scores (Tensor): Classification scores for each\
- scale level. Each is a 4D-tensor with shape\
- (num_decoder, batch_size, num_queries, cls_out_channels).\
- Note `cls_out_channels` should includes background.
- - all_mask_preds (Tensor): Mask scores for each decoder\
- layer. Each with shape (num_decoder, batch_size,\
- num_queries, h, w).
- """
- batch_size = len(img_metas)
- input_img_h, input_img_w = img_metas[0]['batch_input_shape']
- padding_mask = feats[-1].new_ones(
- (batch_size, input_img_h, input_img_w), dtype=torch.float32)
- for i in range(batch_size):
- img_h, img_w, _ = img_metas[i]['img_shape']
- padding_mask[i, :img_h, :img_w] = 0
- padding_mask = F.interpolate(
- padding_mask.unsqueeze(1),
- size=feats[-1].shape[-2:],
- mode='nearest').to(torch.bool).squeeze(1)
- # when backbone is swin, memory is output of last stage of swin.
- # when backbone is r50, memory is output of tranformer encoder.
- mask_features, memory = self.pixel_decoder(feats, img_metas)
- pos_embed = self.decoder_pe(padding_mask)
- memory = self.decoder_input_proj(memory)
- # shape (batch_size, c, h, w) -> (h*w, batch_size, c)
- memory = memory.flatten(2).permute(2, 0, 1)
- pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
- # shape (batch_size, h * w)
- padding_mask = padding_mask.flatten(1)
- # shape = (num_queries, embed_dims)
- query_embed = self.query_embed.weight
- # shape = (num_queries, batch_size, embed_dims)
- query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1)
- target = torch.zeros_like(query_embed)
- # shape (num_decoder, num_queries, batch_size, embed_dims)
- out_dec = self.transformer_decoder(
- query=target,
- key=memory,
- value=memory,
- key_pos=pos_embed,
- query_pos=query_embed,
- key_padding_mask=padding_mask)
- # shape (num_decoder, batch_size, num_queries, embed_dims)
- out_dec = out_dec.transpose(1, 2)
-
- # cls_scores
- all_cls_scores = self.cls_embed(out_dec)
-
- # mask_preds
- mask_embed = self.mask_embed(out_dec)
- all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed,
- mask_features)
-
- return all_cls_scores, all_mask_preds
-
- def forward_train(self,
- feats,
- img_metas,
- gt_bboxes,
- gt_labels,
- gt_masks,
- gt_semantic_seg,
- gt_bboxes_ignore=None):
- """Forward function for training mode.
-
- Args:
- feats (list[Tensor]): Multi-level features from the upstream
- network, each is a 4D-tensor.
- img_metas (list[Dict]): List of image information.
- gt_bboxes (list[Tensor]): Each element is ground truth bboxes of
- the image, shape (num_gts, 4). Not used here.
- gt_labels (list[Tensor]): Each element is ground truth labels of
- each box, shape (num_gts,).
- gt_masks (list[BitmapMasks]): Each element is masks of instances
- of a image, shape (num_gts, h, w).
- gt_semantic_seg (list[tensor] | None): Each element is the ground
- truth of semantic segmentation with the shape (N, H, W).
- [0, num_thing_class - 1] means things,
- [num_thing_class, num_class-1] means stuff,
- 255 means VOID. It's None when training instance segmentation.
- gt_bboxes_ignore (list[Tensor]): Ground truth bboxes to be
- ignored. Defaults to None.
-
- Returns:
- dict[str, Tensor]: a dictionary of loss components
- """
- # not consider ignoring bboxes
- assert gt_bboxes_ignore is None
-
- # forward
- all_cls_scores, all_mask_preds = self(feats, img_metas)
-
- # preprocess ground truth
- gt_labels, gt_masks = self.preprocess_gt(gt_labels, gt_masks,
- gt_semantic_seg, img_metas)
-
- # loss
- losses = self.loss(all_cls_scores, all_mask_preds, gt_labels, gt_masks,
- img_metas)
-
- return losses
-
- def simple_test(self, feats, img_metas, **kwargs):
- """Test without augmentaton.
-
- Args:
- feats (list[Tensor]): Multi-level features from the
- upstream network, each is a 4D-tensor.
- img_metas (list[dict]): List of image information.
-
- Returns:
- tuple: A tuple contains two tensors.
-
- - mask_cls_results (Tensor): Mask classification logits,\
- shape (batch_size, num_queries, cls_out_channels).
- Note `cls_out_channels` should includes background.
- - mask_pred_results (Tensor): Mask logits, shape \
- (batch_size, num_queries, h, w).
- """
- all_cls_scores, all_mask_preds = self(feats, img_metas)
- mask_cls_results = all_cls_scores[-1]
- mask_pred_results = all_mask_preds[-1]
-
- # upsample masks
- img_shape = img_metas[0]['batch_input_shape']
- mask_pred_results = F.interpolate(
- mask_pred_results,
- size=(img_shape[0], img_shape[1]),
- mode='bilinear',
- align_corners=False)
-
- return mask_cls_results, mask_pred_results
diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/__init__.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/__init__.py
deleted file mode 100644
index 6f2fa823fb35fdd90c07065cc93238d08385ce8b..0000000000000000000000000000000000000000
--- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/necks/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .bfp import BFP
-from .channel_mapper import ChannelMapper
-from .ct_resnet_neck import CTResNetNeck
-from .dilated_encoder import DilatedEncoder
-from .dyhead import DyHead
-from .fpg import FPG
-from .fpn import FPN
-from .fpn_carafe import FPN_CARAFE
-from .hrfpn import HRFPN
-from .nas_fpn import NASFPN
-from .nasfcos_fpn import NASFCOS_FPN
-from .pafpn import PAFPN
-from .rfp import RFP
-from .ssd_neck import SSDNeck
-from .yolo_neck import YOLOV3Neck
-from .yolox_pafpn import YOLOXPAFPN
-
-__all__ = [
- 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
- 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
- 'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead'
-]
diff --git a/spaces/ronvolutional/sk-node/app/README.md b/spaces/ronvolutional/sk-node/app/README.md
deleted file mode 100644
index 5c91169b0ca6508bb24301c957a9edea5abf2b01..0000000000000000000000000000000000000000
--- a/spaces/ronvolutional/sk-node/app/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# create-svelte
-
-Everything you need to build a Svelte project, powered by [`create-svelte`](https://github.com/sveltejs/kit/tree/master/packages/create-svelte).
-
-## Creating a project
-
-If you're seeing this, you've probably already done this step. Congrats!
-
-```bash
-# create a new project in the current directory
-npm create svelte@latest
-
-# create a new project in my-app
-npm create svelte@latest my-app
-```
-
-## Developing
-
-Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server:
-
-```bash
-npm run dev
-
-# or start the server and open the app in a new browser tab
-npm run dev -- --open
-```
-
-## Building
-
-To create a production version of your app:
-
-```bash
-npm run build
-```
-
-You can preview the production build with `npm run preview`.
-
-> To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment.
diff --git a/spaces/ronvolutional/sk-node/app/src/app.html b/spaces/ronvolutional/sk-node/app/src/app.html
deleted file mode 100644
index b555f2469a3d6e76d93e70e96ed3fbcf9c470480..0000000000000000000000000000000000000000
--- a/spaces/ronvolutional/sk-node/app/src/app.html
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-
-
-
-
- %sveltekit.head%
-
-
- %sveltekit.body%
-
-
diff --git a/spaces/ruslanmv/Clone-Your-Voice/vocoder/hparams.py b/spaces/ruslanmv/Clone-Your-Voice/vocoder/hparams.py
deleted file mode 100644
index c1de9f7dcc2926735b80a28ed1226ff1b5824753..0000000000000000000000000000000000000000
--- a/spaces/ruslanmv/Clone-Your-Voice/vocoder/hparams.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from synthesizer.hparams import hparams as _syn_hp
-
-
-# Audio settings------------------------------------------------------------------------
-# Match the values of the synthesizer
-sample_rate = _syn_hp.sample_rate
-n_fft = _syn_hp.n_fft
-num_mels = _syn_hp.num_mels
-hop_length = _syn_hp.hop_size
-win_length = _syn_hp.win_size
-fmin = _syn_hp.fmin
-min_level_db = _syn_hp.min_level_db
-ref_level_db = _syn_hp.ref_level_db
-mel_max_abs_value = _syn_hp.max_abs_value
-preemphasis = _syn_hp.preemphasis
-apply_preemphasis = _syn_hp.preemphasize
-
-bits = 9 # bit depth of signal
-mu_law = True # Recommended to suppress noise if using raw bits in hp.voc_mode
- # below
-
-
-# WAVERNN / VOCODER --------------------------------------------------------------------------------
-voc_mode = 'RAW' # either 'RAW' (softmax on raw bits) or 'MOL' (sample from
-# mixture of logistics)
-voc_upsample_factors = (5, 5, 8) # NB - this needs to correctly factorise hop_length
-voc_rnn_dims = 512
-voc_fc_dims = 512
-voc_compute_dims = 128
-voc_res_out_dims = 128
-voc_res_blocks = 10
-
-# Training
-voc_batch_size = 100
-voc_lr = 1e-4
-voc_gen_at_checkpoint = 5 # number of samples to generate at each checkpoint
-voc_pad = 2 # this will pad the input so that the resnet can 'see' wider
- # than input length
-voc_seq_len = hop_length * 5 # must be a multiple of hop_length
-
-# Generating / Synthesizing
-voc_gen_batched = True # very fast (realtime+) single utterance batched generation
-voc_target = 8000 # target number of samples to be generated in each batch entry
-voc_overlap = 400 # number of samples for crossfading between batches
diff --git a/spaces/sahshd/ChuanhuChatGPT/Dockerfile b/spaces/sahshd/ChuanhuChatGPT/Dockerfile
deleted file mode 100644
index 335c2dba28ba8c365de9306858462a59dea25f28..0000000000000000000000000000000000000000
--- a/spaces/sahshd/ChuanhuChatGPT/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM python:3.9 as builder
-RUN apt-get update && apt-get install -y build-essential
-COPY requirements.txt .
-COPY requirements_advanced.txt .
-RUN pip install --user -r requirements.txt
-# RUN pip install --user -r requirements_advanced.txt
-
-FROM python:3.9
-MAINTAINER iskoldt
-COPY --from=builder /root/.local /root/.local
-ENV PATH=/root/.local/bin:$PATH
-COPY . /app
-WORKDIR /app
-ENV dockerrun yes
-CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
diff --git a/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/app.py b/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/app.py
deleted file mode 100644
index 7163191e31b990134dcf6c397ddb71a88d30cdfc..0000000000000000000000000000000000000000
--- a/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/app.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import gradio as gr
-import os
-import numpy as np
-os.system("pip install pdfminer.six rank_bm25 torch transformers")
-
-from gradio.mix import Series
-#import re
-from rank_bm25 import BM25Okapi
-import string
-import torch
-from transformers import pipeline
-import pdfminer
-from pdfminer.high_level import extract_text
-
-len_doc = 500
-overlap = 15
-param_top_k_retriver = 15
-param_top_k_ranker = 3
-
-def read_pdf(file):
- text = extract_text(file.name)
- # Split text into smaller docs
- docs = []
-
- i = 0
- while i < len(text):
- docs.append(text[i:i+len_doc])
- i = i + len_doc - overlap
- return docs
-
- # We use BM25 as retriver which will do 1st round of candidate filtering based on word based matching
-
-def bm25_tokenizer(text):
- stop_w = ['a', 'the', 'am', 'is' , 'are', 'who', 'how', 'where', 'when', 'why', 'what']
- tokenized_doc = []
- for token in text.lower().split():
- token = token.strip(string.punctuation)
-
- if len(token) > 0 and token not in stop_w:
- tokenized_doc.append(token)
- return tokenized_doc
-
-def retrieval(query, top_k_retriver, docs, bm25_):
-
- bm25_scores = bm25_.get_scores(bm25_tokenizer(query))
- top_n = np.argsort(bm25_scores)[::-1][:top_k_retriver]
- bm25_hits = [{'corpus_id': idx,
- 'score': bm25_scores[idx],
- 'docs':docs[idx]} for idx in top_n if bm25_scores[idx] > 0]
- bm25_hits = sorted(bm25_hits, key=lambda x: x['score'], reverse=True)
-
- return bm25_hits
-
-def qa_ranker(query, docs_, top_k_ranker, qa_model):
- ans = []
- for doc in docs_:
- answer = qa_model(question = query,
- context = doc)
- answer['doc'] = doc
- ans.append(answer)
- return sorted(ans, key=lambda x: x['score'], reverse=True)[:top_k_ranker]
-
-def cstr(s, color='black'):
- return "{} ".format(color, s)
-def cstr_bold(s, color='black'):
- return "{} ".format(color, s)
-def cstr_break(s, color='black'):
- return " {} ".format(color, s)
-
-def print_colored(text, start_idx, end_idx, confidence):
- conf_str = '- Confidence: ' + confidence
- a = cstr(' '.join([text[:start_idx], \
- cstr_bold(text[start_idx:end_idx], color='blue'), \
- text[end_idx:], \
- cstr_break(conf_str, color='grey')]), color='black')
- return a
-
-def final_qa_pipeline(file, query, model_nm):
- docs = read_pdf(file)
- tokenized_corpus = []
- for doc in docs:
- tokenized_corpus.append(bm25_tokenizer(doc))
-
- bm25 = BM25Okapi(tokenized_corpus)
-
- top_k_retriver, top_k_ranker = param_top_k_retriver, param_top_k_ranker
- lvl1 = retrieval(query, top_k_retriver, docs, bm25)
-
- qa_model = pipeline("question-answering",
- #model = "deepset/minilm-uncased-squad2")
- model = "deepset/"+ str(model_nm))
-
- if len(lvl1) > 0:
- fnl_rank = qa_ranker(query, [l["docs"] for l in lvl1], top_k_ranker,qa_model)
- top1 = print_colored(fnl_rank[0]['doc'], fnl_rank[0]['start'], fnl_rank[0]['end'], str(np.round(100*fnl_rank[0]["score"],1))+"%")
- if len(lvl1)>1:
- top2 = print_colored(fnl_rank[1]['doc'], fnl_rank[1]['start'], fnl_rank[1]['end'], str(np.round(100*fnl_rank[1]["score"],1))+"%")
- else:
- top2 = "None"
- return (top1, top2)
- else:
- return ("No match","No match")
-
-examples = [
- [os.path.abspath("dbs-annual-report-2020.pdf"), "how many times has DBS won Best bank in the world ?","minilm-uncased-squad2"],
- [os.path.abspath("dbs-annual-report-2020.pdf"), "how much dividend was paid to shareholders ?","minilm-uncased-squad2"],
- [os.path.abspath("dbs-annual-report-2020.pdf"), "what is the sustainability focus ?","minilm-uncased-squad2"],
- [os.path.abspath("NASDAQ_AAPL_2020.pdf"), "how much are the outstanding shares ?","minilm-uncased-squad2"],
- [os.path.abspath("NASDAQ_AAPL_2020.pdf"), "what is competitors strategy ?","minilm-uncased-squad2"],
- [os.path.abspath("NASDAQ_AAPL_2020.pdf"), "who is the chief executive officer ?","minilm-uncased-squad2"],
- [os.path.abspath("NASDAQ_MSFT_2020.pdf"), "How much is the guided revenue for next quarter?","minilm-uncased-squad2"],
-]
-
-iface = gr.Interface(
- fn = final_qa_pipeline,
- inputs = [gr.inputs.File(label="input pdf file"), gr.inputs.Textbox(label="Question:"), gr.inputs.Dropdown(choices=["minilm-uncased-squad2","roberta-base-squad2"],label="Model")],
- outputs = [gr.outputs.HTML(label="Top 1 answer"), gr.outputs.HTML(label="Top 2 answer")],
- examples=examples,
- theme = "grass",
- title = "Question Answering on annual reports",
- description = "Navigate long annual reports by using Machine learning to answer your questions. \nSimply upload any annual report pdf you are interested in and ask model a question OR load an example from below."
- )
-iface.launch(enable_queue = True)
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/example/21 Jump Street 720p Yify 208 UPD.md b/spaces/scedlatioru/img-to-music/example/21 Jump Street 720p Yify 208 UPD.md
deleted file mode 100644
index 37ee8e323ecd7aafa81a3bc87a49cc0ca3db8faf..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/21 Jump Street 720p Yify 208 UPD.md
+++ /dev/null
@@ -1,6 +0,0 @@
-21 jump street 720p yify 208 Download ☆☆☆☆☆ https://gohhs.com/2uEAtr
-
-Iron Man 3[720p]1.33GB, 8 years, Movie, 5, 1.33 GB, 0, 0. Magnet Link · Iron Man Trilogy(2008-2013) 720p BRRiP X264 AAC 5.1 [Team Nanban, 8 years, Movie ... 1fdad05405
-
-
-
diff --git a/spaces/scedlatioru/img-to-music/example/IntroduccionALaPsicologiaRobertFeldmanPdf.md b/spaces/scedlatioru/img-to-music/example/IntroduccionALaPsicologiaRobertFeldmanPdf.md
deleted file mode 100644
index 3c2fc7b4b7b1333b6e926a10848140744793729f..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/IntroduccionALaPsicologiaRobertFeldmanPdf.md
+++ /dev/null
@@ -1,6 +0,0 @@
-IntroduccionALaPsicologiaRobertFeldmanPdf Download Zip 🆗 https://gohhs.com/2uEABa
-
-INTRODUCCIÓN. A LA PSICOLOGÃA. Un enfoque ecosistémico. Rogelio DÃaz-Guerrero. Rolando DÃaz-Loving. ISBN 978-968-24-5406-6. CONCEPTOS. 4d29de3e1b
-
-
-
diff --git a/spaces/scedlatioru/img-to-music/example/Microsoft Flight Simulator X CPY Crack High Quality Torrent Free Download [2020].md b/spaces/scedlatioru/img-to-music/example/Microsoft Flight Simulator X CPY Crack High Quality Torrent Free Download [2020].md
deleted file mode 100644
index ae180abcd2452bdcf410e74231dd882097790dad..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Microsoft Flight Simulator X CPY Crack High Quality Torrent Free Download [2020].md
+++ /dev/null
@@ -1,178 +0,0 @@
-
-Microsoft Flight Simulator X CPY Crack Torrent Free Download [2020]
-
-If you are a fan of flight simulation games, you might have heard of Microsoft Flight Simulator X, one of the most realistic and immersive flight simulators ever created. Microsoft Flight Simulator X is the tenth and final installment of the Microsoft Flight Simulator series, which was first launched in 1982. It was released in 2006 for Windows and is still popular among flight enthusiasts and gamers.
-
-Microsoft Flight Simulator X offers a wide range of features and options that make it a unique and enjoyable experience. You can choose from over 80 different aircraft, ranging from light planes to wide-body jets, and fly them in various weather conditions and scenarios. You can also create your own flight plan and fly anywhere on the planet, with over 24,000 airports and realistic scenery based on satellite imagery. You can also take on various missions and challenges, such as rescue operations, air races, combat scenarios, and more.
-Microsoft Flight Simulator X CPY Crack Torrent Free Download [2020] Download File ✸ https://gohhs.com/2uEzTe
-
-However, if you want to play Microsoft Flight Simulator X on your PC, you might face some difficulties. The game is not compatible with newer versions of Windows, such as Windows 10, and requires a lot of system resources to run smoothly. Moreover, the game is not available for free, and you need to purchase it from online stores or physical copies.
-
-How to Download Microsoft Flight Simulator X CPY Crack Torrent for Free?
-
-If you want to download Microsoft Flight Simulator X CPY Crack Torrent for free, you can follow these simple steps:
-
-
-Visit a torrent website that offers Microsoft Flight Simulator X CPY Crack Torrent, such as SkidrowCPY.Games or CPYGames.Site.
-Search for Microsoft Flight Simulator X CPY Crack Torrent using the search bar or browse the categories.
-Select the torrent file that has the most seeders and leechers and click on the download link.
-You will need a torrent client software to download the torrent file, such as uTorrent or BitTorrent.
-Open the torrent file with your torrent client and choose a location to save the game files.
-Wait for the download to complete. It might take some time depending on your internet speed and the size of the game files.
-Once the download is finished, you will have a folder with the game files and a crack folder.
-Copy the contents of the crack folder and paste them into the game folder, replacing the original files.
-Run the game as administrator and enjoy!
-
-
-What are the Benefits of Downloading Microsoft Flight Simulator X CPY Crack Torrent?
-
-By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can enjoy many benefits such as:
-
-
-You can play Microsoft Flight Simulator X on your PC without any compatibility issues or errors.
-You can play Microsoft Flight Simulator X for free without spending any money or registering any account.
-You can play Microsoft Flight Simulator X offline without any internet connection or online activation.
-You can play Microsoft Flight Simulator X with all the features and options unlocked and available.
-You can play Microsoft Flight Simulator X with high graphics quality and performance.
-
-
-Conclusion
-
-Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can play this amazing game on your PC for free and without any hassle. So what are you waiting for? Download Microsoft Flight Simulator X CPY Crack Torrent today and start your flight adventure!
-What is CPY and How Does It Work?
-
-CPY is a group of hackers and crackers who are known for cracking and releasing various games for free. CPY is one of the most popular and respected groups in the scene, and they have cracked many games that were protected by strong anti-piracy measures, such as Denuvo.
-
-CPY works by reverse-engineering the game files and finding the encryption keys and algorithms that are used to protect the game from unauthorized copying and modification. They then create a crack file that bypasses or removes these protections and allows the game to run without any restrictions or limitations.
-
-
-CPY releases their cracks and games on various torrent websites, where users can download them for free. However, CPY does not provide any support or updates for their cracks and games, and they are not responsible for any damages or issues that may arise from using them.
-
-What are the Risks of Downloading Microsoft Flight Simulator X CPY Crack Torrent?
-
-While downloading Microsoft Flight Simulator X CPY Crack Torrent may seem tempting and convenient, it also comes with some risks and disadvantages that you should be aware of. Some of the risks are:
-
-
-You may download a fake or malicious file that may harm your computer or steal your personal information.
-You may violate the intellectual property rights of the game developers and publishers, and face legal consequences or penalties.
-You may not be able to access the online features and updates of the game, such as multiplayer mode, patches, DLCs, etc.
-You may encounter bugs, errors, crashes, or performance issues that may ruin your gaming experience.
-You may miss out on the satisfaction and enjoyment of buying and playing a legitimate copy of the game.
-
-
-How to Buy and Play Microsoft Flight Simulator X Legally?
-
-If you want to buy and play Microsoft Flight Simulator X legally, you can follow these simple steps:
-
-
-Visit an online store that sells Microsoft Flight Simulator X, such as Steam or Amazon.
-Search for Microsoft Flight Simulator X using the search bar or browse the categories.
-Select the edition and version of the game that you want to buy.
-Add the game to your cart and proceed to checkout.
-Enter your payment details and confirm your order.
-You will receive an email with a download link or a product key for the game.
-Download and install the game on your PC using the link or the key.
-Launch the game and enjoy!
-
-
-Conclusion
-
-Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By buying and playing Microsoft Flight Simulator X legally, you can support the game developers and publishers, and enjoy all the features and benefits of the game. So what are you waiting for? Buy Microsoft Flight Simulator X today and start your flight adventure!
-What are the System Requirements for Microsoft Flight Simulator X?
-
-Microsoft Flight Simulator X is a demanding game that requires a powerful PC to run smoothly. Here are the minimum and recommended system requirements for Microsoft Flight Simulator X:
-
-
-
-Minimum
-Recommended
-
-
-CPU: Intel Core 2 Duo E8400 or AMD Athlon X2 6000+
-CPU: Intel Core i5-3570K or AMD FX-8320
-
-
-RAM: 2 GB
-RAM: 8 GB
-
-
-OS: Windows XP SP2 or later
-OS: Windows 10 64-bit
-
-
-GPU: Nvidia GeForce 8800 GT or AMD Radeon HD 5670
-GPU: Nvidia GeForce GTX 1050 Ti or AMD Radeon RX 560
-
-
-VRAM: 256 MB
-VRAM: 4 GB
-
-
-DirectX: Version 9.0c
-DirectX: Version 11
-
-
-HDD: 30 GB
-HDD: 30 GB
-
-
-Sound Card: DirectX Compatible Sound Card with latest drivers
-Sound Card: DirectX Compatible Sound Card with latest drivers
-
-
-
-You can check your PC specifications and compare them with the system requirements using various online tools, such as Can You Run It or PCGameBenchmark.
-
-How to Optimize Microsoft Flight Simulator X for Better Performance?
-
-If you are facing any issues or problems while playing Microsoft Flight Simulator X, such as low FPS, stuttering, lagging, crashing, etc., you can try some of these tips and tricks to optimize Microsoft Flight Simulator X for better performance:
-
-
-Update your graphics card drivers and DirectX to the latest versions.
-Close any unnecessary background programs and processes that may consume your CPU, RAM, or bandwidth.
-Adjust your graphics settings in the game options menu according to your PC specifications and preferences. You can lower some settings, such as resolution, anti-aliasing, texture quality, shadows, etc., to improve your FPS and reduce lag.
-Clean your PC from any junk files, viruses, malware, etc., that may affect your system performance and stability.
-Defragment your hard drive to improve your loading times and disk speed.
-Use a game booster software, such as Razer Cortex or Wise Game Booster, to optimize your PC settings and resources for gaming.
-Use a VPN service, such as NordVPN or ExpressVPN, to improve your online connection and reduce ping.
-Contact Microsoft support or visit their official forums if you encounter any bugs, errors, or glitches that may prevent you from playing the game properly.
-
-
-Conclusion
-
-Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can play this amazing game on your PC for free and without any hassle. So what are you waiting for? Download Microsoft Flight Simulator X CPY Crack Torrent today and start your flight adventure!
-How to Install Microsoft Flight Simulator X CPY Crack Torrent?
-
-After downloading Microsoft Flight Simulator X CPY Crack Torrent, you need to install it on your PC to play the game. Here are the steps to install Microsoft Flight Simulator X CPY Crack Torrent:
-
-
-Extract the downloaded torrent file using a software such as WinRAR or 7-Zip.
-Open the extracted folder and run the setup.exe file as administrator.
-Follow the instructions on the screen and choose a location to install the game.
-Wait for the installation to complete. It may take some time depending on your PC specifications and disk speed.
-Copy the contents of the crack folder and paste them into the game installation folder, replacing the original files.
-Run the game as administrator and enjoy!
-
-
-How to Troubleshoot Microsoft Flight Simulator X CPY Crack Torrent?
-
-If you encounter any problems or errors while playing Microsoft Flight Simulator X CPY Crack Torrent, such as black screen, missing DLL files, activation error, etc., you can try some of these solutions to troubleshoot Microsoft Flight Simulator X CPY Crack Torrent:
-
-
-Make sure your PC meets the minimum or recommended system requirements for Microsoft Flight Simulator X.
-Make sure your graphics card drivers and DirectX are updated to the latest versions.
-Make sure you have installed Microsoft Flight Simulator X CPY Crack Torrent correctly and copied the crack files properly.
-Make sure you have disabled your antivirus or firewall software before running the game, as they may interfere with the crack or block the game files.
-Make sure you have run the game as administrator and given it full permissions.
-Make sure you have installed any required software or components for the game, such as Microsoft Visual C++, .NET Framework, etc.
-Contact Microsoft support or visit their official forums if you encounter any bugs, glitches, or issues that may prevent you from playing the game properly.
-
-
-Conclusion
-
-Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can play this amazing game on your PC for free and without any hassle. So what are you waiting for? Download Microsoft Flight Simulator X CPY Crack Torrent today and start your flight adventure!
-Conclusion
-
-Microsoft Flight Simulator X is one of the best flight simulation games ever made. It offers a realistic and immersive experience that will make you feel like a real pilot. By downloading Microsoft Flight Simulator X CPY Crack Torrent, you can play this amazing game on your PC for free and without any hassle. So what are you waiting for? Download Microsoft Flight Simulator X CPY Crack Torrent today and start your flight adventure!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/example/Punto De Ventas Plus 5.95 ((INSTALL)) Keygen.md b/spaces/scedlatioru/img-to-music/example/Punto De Ventas Plus 5.95 ((INSTALL)) Keygen.md
deleted file mode 100644
index faca091eeca3aec8e21bbce22e70ee629b562423..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Punto De Ventas Plus 5.95 ((INSTALL)) Keygen.md
+++ /dev/null
@@ -1,14 +0,0 @@
-punto de ventas plus 5.95 keygen Download Zip ⚡ https://gohhs.com/2uEzk7
-
-pintalibro.es/project-waste-management/paint-factory/genvex-hardwares-2015-premier-series-of-collecting-impression-cret-creative-stop-by-bristol-2015-by-bristol-plaza/ It is apparent that you're excited by some sort of website. Its look is excellent and the content material on the website appears to be very useful. Is perhaps the information on this site that is a beginner?
-
-pintalibro.es/project-waste-management/paint-factory/genvex-hardwares-2015-premier-series-of-collecting-impression-cret-creative-stop-by-bristol-2015-by-bristol-plaza/ The very first thing you want to do is have a niche that you would like to participate in. Are you actually seeking to create a site about Grommets? Do you have any special objectives in mind? What is your mission for the site? You must answer these questions in your mind before you begin.
-
-pintalibro.es/project-waste-management/paint-factory/genvex-hardwares-2015-premier-series-of-collecting-impression-cret-creative-stop-by-bristol-2015-by-bristol-plaza/ There is not any doubt that you intend to do a website about Grommets. Your blog is for those who really would love to obtain more information about the subject.
-
-pintalibro.es/project-waste-management/paint-factory/genvex-hardwares-2015-premier-series-of-collecting-impression-cret-creative-stop-by-bristol-2015-by-bristol-plaza/ Do you want to really get the visitor traffic that you may benefit from? If yes, then a very good approach to go about this is to include a few popular topics on your site. If you can really keep up with the latest news, it is a superb means to provide your blog the interest that it needs and need to be known.
-
-pintalibro.es/project-waste-management/paint 4fefd39f24
-
-
-
diff --git a/spaces/scedlatioru/img-to-music/example/Zooskool Carmen Nubian Petlove.md b/spaces/scedlatioru/img-to-music/example/Zooskool Carmen Nubian Petlove.md
deleted file mode 100644
index 573e421bc432f55cbfde3890f8c4b92b0507b9cf..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Zooskool Carmen Nubian Petlove.md
+++ /dev/null
@@ -1,28 +0,0 @@
-Zooskool Carmen Nubian Petlove Download ✦ https://gohhs.com/2uEzoQ
-
-Monster truck porn - disney girl porn
-
-A very young boy wants to know if his father will be home soon. Carmen contida abriendose los pechos - Video de grupos de hombres enojados por complacer a su pareja.
-
-A young boy is wondering what his father is doing today. New York Lesbian – Lesbian Party · Lesbians are going to the gym.
-
-A very young boy wants to know if his father will be home soon. A young boy and a very young girl are busy talking. A young boy wants to know if his father will be home soon. A very young boy wants to know if his father will be home soon. A young boy and a very young girl are busy talking. A very young boy wants to know if his father will be home soon. A young boy and a very young girl are busy talking. A very young boy wants to know if his father will be home soon. A very young boy wants to know if his father will be home soon. A young boy and a very young girl are busy talking. A very young boy wants to know if his father will be home soon.Evaluation of an education and research alliance at a university cancer center.
-
-Developments in oncology practice have led to a change in expectations regarding the educational requirements of providers in clinical and research settings. In response, the authors developed and implemented a college-based training program for a newly established cancer center. The authors describe the development of the program, the reactions of participants, and the results of the program. The most significant effect of the program was the involvement of first-year medical students in basic science research. The program was also found to have positive effects on interprofessional education in the cancer center and on the development of junior faculty.We are Proud to be your local professional plumbing and water
-
-supply specialists
-
-We can take care of your needs!
-
-Whether it be a plumbing installation, repair or replacement, our technicians will provide you with friendly and professional service at a reasonable rate.
-
-Vital Services of Illinois
-
-When it comes to hiring a licensed plumber, you want to find someone who you can trust, as well as someone who you can rely on to get the job done right.
-
-Our company is proud to be an Illinois plumbing and water supply specialist.
-
-Our team of well trained technicians are well versed in both residential and commercial plumbing, as well as water systems. 4fefd39f24
-
-
-
diff --git a/spaces/sdeeas/ChuanhuChatGPT/modules/base_model.py b/spaces/sdeeas/ChuanhuChatGPT/modules/base_model.py
deleted file mode 100644
index 2b55623f6b0989f60d818be6e0e77f5948484b82..0000000000000000000000000000000000000000
--- a/spaces/sdeeas/ChuanhuChatGPT/modules/base_model.py
+++ /dev/null
@@ -1,561 +0,0 @@
-from __future__ import annotations
-from typing import TYPE_CHECKING, List
-
-import logging
-import json
-import commentjson as cjson
-import os
-import sys
-import requests
-import urllib3
-import traceback
-
-from tqdm import tqdm
-import colorama
-from duckduckgo_search import ddg
-import asyncio
-import aiohttp
-from enum import Enum
-
-from .presets import *
-from .llama_func import *
-from .utils import *
-from . import shared
-from .config import retrieve_proxy
-
-
-class ModelType(Enum):
- Unknown = -1
- OpenAI = 0
- ChatGLM = 1
- LLaMA = 2
- XMChat = 3
-
- @classmethod
- def get_type(cls, model_name: str):
- model_type = None
- model_name_lower = model_name.lower()
- if "gpt" in model_name_lower:
- model_type = ModelType.OpenAI
- elif "chatglm" in model_name_lower:
- model_type = ModelType.ChatGLM
- elif "llama" in model_name_lower or "alpaca" in model_name_lower:
- model_type = ModelType.LLaMA
- elif "xmchat" in model_name_lower:
- model_type = ModelType.XMChat
- else:
- model_type = ModelType.Unknown
- return model_type
-
-
-class BaseLLMModel:
- def __init__(
- self,
- model_name,
- system_prompt="",
- temperature=1.0,
- top_p=1.0,
- n_choices=1,
- stop=None,
- max_generation_token=None,
- presence_penalty=0,
- frequency_penalty=0,
- logit_bias=None,
- user="",
- ) -> None:
- self.history = []
- self.all_token_counts = []
- self.model_name = model_name
- self.model_type = ModelType.get_type(model_name)
- try:
- self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
- except KeyError:
- self.token_upper_limit = DEFAULT_TOKEN_LIMIT
- self.interrupted = False
- self.system_prompt = system_prompt
- self.api_key = None
- self.need_api_key = False
- self.single_turn = False
-
- self.temperature = temperature
- self.top_p = top_p
- self.n_choices = n_choices
- self.stop_sequence = stop
- self.max_generation_token = None
- self.presence_penalty = presence_penalty
- self.frequency_penalty = frequency_penalty
- self.logit_bias = logit_bias
- self.user_identifier = user
-
- def get_answer_stream_iter(self):
- """stream predict, need to be implemented
- conversations are stored in self.history, with the most recent question, in OpenAI format
- should return a generator, each time give the next word (str) in the answer
- """
- logging.warning("stream predict not implemented, using at once predict instead")
- response, _ = self.get_answer_at_once()
- yield response
-
- def get_answer_at_once(self):
- """predict at once, need to be implemented
- conversations are stored in self.history, with the most recent question, in OpenAI format
- Should return:
- the answer (str)
- total token count (int)
- """
- logging.warning("at once predict not implemented, using stream predict instead")
- response_iter = self.get_answer_stream_iter()
- count = 0
- for response in response_iter:
- count += 1
- return response, sum(self.all_token_counts) + count
-
- def billing_info(self):
- """get billing infomation, inplement if needed"""
- logging.warning("billing info not implemented, using default")
- return BILLING_NOT_APPLICABLE_MSG
-
- def count_token(self, user_input):
- """get token count from input, implement if needed"""
- logging.warning("token count not implemented, using default")
- return len(user_input)
-
- def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
- def get_return_value():
- return chatbot, status_text
-
- status_text = i18n("开始实时传输回答……")
- if fake_input:
- chatbot.append((fake_input, ""))
- else:
- chatbot.append((inputs, ""))
-
- user_token_count = self.count_token(inputs)
- self.all_token_counts.append(user_token_count)
- logging.debug(f"输入token计数: {user_token_count}")
-
- stream_iter = self.get_answer_stream_iter()
-
- for partial_text in stream_iter:
- chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
- self.all_token_counts[-1] += 1
- status_text = self.token_message()
- yield get_return_value()
- if self.interrupted:
- self.recover()
- break
- self.history.append(construct_assistant(partial_text))
-
- def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
- if fake_input:
- chatbot.append((fake_input, ""))
- else:
- chatbot.append((inputs, ""))
- if fake_input is not None:
- user_token_count = self.count_token(fake_input)
- else:
- user_token_count = self.count_token(inputs)
- self.all_token_counts.append(user_token_count)
- ai_reply, total_token_count = self.get_answer_at_once()
- self.history.append(construct_assistant(ai_reply))
- if fake_input is not None:
- self.history[-2] = construct_user(fake_input)
- chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
- if fake_input is not None:
- self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
- else:
- self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
- status_text = self.token_message()
- return chatbot, status_text
-
- def handle_file_upload(self, files, chatbot):
- """if the model accepts multi modal input, implement this function"""
- status = gr.Markdown.update()
- if files:
- construct_index(self.api_key, file_src=files)
- status = "索引构建完成"
- return gr.Files.update(), chatbot, status
-
- def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
- fake_inputs = None
- display_append = []
- limited_context = False
- fake_inputs = real_inputs
- if files:
- from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
- from llama_index.indices.query.schema import QueryBundle
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
- from langchain.chat_models import ChatOpenAI
- from llama_index import (
- GPTSimpleVectorIndex,
- ServiceContext,
- LangchainEmbedding,
- OpenAIEmbedding,
- )
- limited_context = True
- msg = "加载索引中……"
- logging.info(msg)
- # yield chatbot + [(inputs, "")], msg
- index = construct_index(self.api_key, file_src=files)
- assert index is not None, "获取索引失败"
- msg = "索引获取成功,生成回答中……"
- logging.info(msg)
- if local_embedding or self.model_type != ModelType.OpenAI:
- embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
- else:
- embed_model = OpenAIEmbedding()
- # yield chatbot + [(inputs, "")], msg
- with retrieve_proxy():
- prompt_helper = PromptHelper(
- max_input_size=4096,
- num_output=5,
- max_chunk_overlap=20,
- chunk_size_limit=600,
- )
- from llama_index import ServiceContext
-
- service_context = ServiceContext.from_defaults(
- prompt_helper=prompt_helper, embed_model=embed_model
- )
- query_object = GPTVectorStoreIndexQuery(
- index.index_struct,
- service_context=service_context,
- similarity_top_k=5,
- vector_store=index._vector_store,
- docstore=index._docstore,
- )
- query_bundle = QueryBundle(real_inputs)
- nodes = query_object.retrieve(query_bundle)
- reference_results = [n.node.text for n in nodes]
- reference_results = add_source_numbers(reference_results, use_source=False)
- display_append = add_details(reference_results)
- display_append = "\n\n" + "".join(display_append)
- real_inputs = (
- replace_today(PROMPT_TEMPLATE)
- .replace("{query_str}", real_inputs)
- .replace("{context_str}", "\n\n".join(reference_results))
- .replace("{reply_language}", reply_language)
- )
- elif use_websearch:
- limited_context = True
- search_results = ddg(real_inputs, max_results=5)
- reference_results = []
- for idx, result in enumerate(search_results):
- logging.debug(f"搜索结果{idx + 1}:{result}")
- domain_name = urllib3.util.parse_url(result["href"]).host
- reference_results.append([result["body"], result["href"]])
- display_append.append(
- # f"{idx+1}. [{domain_name}]({result['href']})\n"
- f"{domain_name} \n"
- )
- reference_results = add_source_numbers(reference_results)
- display_append = "\n\n" + "".join(display_append) + " "
- real_inputs = (
- replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
- .replace("{query}", real_inputs)
- .replace("{web_results}", "\n\n".join(reference_results))
- .replace("{reply_language}", reply_language)
- )
- else:
- display_append = ""
- return limited_context, fake_inputs, display_append, real_inputs, chatbot
-
- def predict(
- self,
- inputs,
- chatbot,
- stream=False,
- use_websearch=False,
- files=None,
- reply_language="中文",
- should_check_token_count=True,
- ): # repetition_penalty, top_k
-
- status_text = "开始生成回答……"
- logging.info(
- "输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
- )
- if should_check_token_count:
- yield chatbot + [(inputs, "")], status_text
- if reply_language == "跟随问题语言(不稳定)":
- reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
-
- limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
- yield chatbot + [(fake_inputs, "")], status_text
-
- if (
- self.need_api_key and
- self.api_key is None
- and not shared.state.multi_api_key
- ):
- status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
- logging.info(status_text)
- chatbot.append((inputs, ""))
- if len(self.history) == 0:
- self.history.append(construct_user(inputs))
- self.history.append("")
- self.all_token_counts.append(0)
- else:
- self.history[-2] = construct_user(inputs)
- yield chatbot + [(inputs, "")], status_text
- return
- elif len(inputs.strip()) == 0:
- status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
- logging.info(status_text)
- yield chatbot + [(inputs, "")], status_text
- return
-
- if self.single_turn:
- self.history = []
- self.all_token_counts = []
- self.history.append(construct_user(inputs))
-
- try:
- if stream:
- logging.debug("使用流式传输")
- iter = self.stream_next_chatbot(
- inputs,
- chatbot,
- fake_input=fake_inputs,
- display_append=display_append,
- )
- for chatbot, status_text in iter:
- yield chatbot, status_text
- else:
- logging.debug("不使用流式传输")
- chatbot, status_text = self.next_chatbot_at_once(
- inputs,
- chatbot,
- fake_input=fake_inputs,
- display_append=display_append,
- )
- yield chatbot, status_text
- except Exception as e:
- traceback.print_exc()
- status_text = STANDARD_ERROR_MSG + str(e)
- yield chatbot, status_text
-
- if len(self.history) > 1 and self.history[-1]["content"] != inputs:
- logging.info(
- "回答为:"
- + colorama.Fore.BLUE
- + f"{self.history[-1]['content']}"
- + colorama.Style.RESET_ALL
- )
-
- if limited_context:
- # self.history = self.history[-4:]
- # self.all_token_counts = self.all_token_counts[-2:]
- self.history = []
- self.all_token_counts = []
-
- max_token = self.token_upper_limit - TOKEN_OFFSET
-
- if sum(self.all_token_counts) > max_token and should_check_token_count:
- count = 0
- while (
- sum(self.all_token_counts)
- > self.token_upper_limit * REDUCE_TOKEN_FACTOR
- and sum(self.all_token_counts) > 0
- ):
- count += 1
- del self.all_token_counts[0]
- del self.history[:2]
- logging.info(status_text)
- status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
- yield chatbot, status_text
-
- def retry(
- self,
- chatbot,
- stream=False,
- use_websearch=False,
- files=None,
- reply_language="中文",
- ):
- logging.debug("重试中……")
- if len(self.history) > 0:
- inputs = self.history[-2]["content"]
- del self.history[-2:]
- self.all_token_counts.pop()
- elif len(chatbot) > 0:
- inputs = chatbot[-1][0]
- else:
- yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
- return
-
- iter = self.predict(
- inputs,
- chatbot,
- stream=stream,
- use_websearch=use_websearch,
- files=files,
- reply_language=reply_language,
- )
- for x in iter:
- yield x
- logging.debug("重试完毕")
-
- # def reduce_token_size(self, chatbot):
- # logging.info("开始减少token数量……")
- # chatbot, status_text = self.next_chatbot_at_once(
- # summarize_prompt,
- # chatbot
- # )
- # max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
- # num_chat = find_n(self.all_token_counts, max_token_count)
- # logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
- # chatbot = chatbot[:-1]
- # self.history = self.history[-2*num_chat:] if num_chat > 0 else []
- # self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
- # msg = f"保留了最近{num_chat}轮对话"
- # logging.info(msg)
- # logging.info("减少token数量完毕")
- # return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
-
- def interrupt(self):
- self.interrupted = True
-
- def recover(self):
- self.interrupted = False
-
- def set_token_upper_limit(self, new_upper_limit):
- self.token_upper_limit = new_upper_limit
- print(f"token上限设置为{new_upper_limit}")
-
- def set_temperature(self, new_temperature):
- self.temperature = new_temperature
-
- def set_top_p(self, new_top_p):
- self.top_p = new_top_p
-
- def set_n_choices(self, new_n_choices):
- self.n_choices = new_n_choices
-
- def set_stop_sequence(self, new_stop_sequence: str):
- new_stop_sequence = new_stop_sequence.split(",")
- self.stop_sequence = new_stop_sequence
-
- def set_max_tokens(self, new_max_tokens):
- self.max_generation_token = new_max_tokens
-
- def set_presence_penalty(self, new_presence_penalty):
- self.presence_penalty = new_presence_penalty
-
- def set_frequency_penalty(self, new_frequency_penalty):
- self.frequency_penalty = new_frequency_penalty
-
- def set_logit_bias(self, logit_bias):
- logit_bias = logit_bias.split()
- bias_map = {}
- encoding = tiktoken.get_encoding("cl100k_base")
- for line in logit_bias:
- word, bias_amount = line.split(":")
- if word:
- for token in encoding.encode(word):
- bias_map[token] = float(bias_amount)
- self.logit_bias = bias_map
-
- def set_user_identifier(self, new_user_identifier):
- self.user_identifier = new_user_identifier
-
- def set_system_prompt(self, new_system_prompt):
- self.system_prompt = new_system_prompt
-
- def set_key(self, new_access_key):
- self.api_key = new_access_key.strip()
- msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
- logging.info(msg)
- return self.api_key, msg
-
- def set_single_turn(self, new_single_turn):
- self.single_turn = new_single_turn
-
- def reset(self):
- self.history = []
- self.all_token_counts = []
- self.interrupted = False
- return [], self.token_message([0])
-
- def delete_first_conversation(self):
- if self.history:
- del self.history[:2]
- del self.all_token_counts[0]
- return self.token_message()
-
- def delete_last_conversation(self, chatbot):
- if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
- msg = "由于包含报错信息,只删除chatbot记录"
- chatbot.pop()
- return chatbot, self.history
- if len(self.history) > 0:
- self.history.pop()
- self.history.pop()
- if len(chatbot) > 0:
- msg = "删除了一组chatbot对话"
- chatbot.pop()
- if len(self.all_token_counts) > 0:
- msg = "删除了一组对话的token计数记录"
- self.all_token_counts.pop()
- msg = "删除了一组对话"
- return chatbot, msg
-
- def token_message(self, token_lst=None):
- if token_lst is None:
- token_lst = self.all_token_counts
- token_sum = 0
- for i in range(len(token_lst)):
- token_sum += sum(token_lst[: i + 1])
- return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
-
- def save_chat_history(self, filename, chatbot, user_name):
- if filename == "":
- return
- if not filename.endswith(".json"):
- filename += ".json"
- return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
-
- def export_markdown(self, filename, chatbot, user_name):
- if filename == "":
- return
- if not filename.endswith(".md"):
- filename += ".md"
- return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
-
- def load_chat_history(self, filename, chatbot, user_name):
- logging.debug(f"{user_name} 加载对话历史中……")
- if type(filename) != str:
- filename = filename.name
- try:
- with open(os.path.join(HISTORY_DIR, user_name, filename), "r") as f:
- json_s = json.load(f)
- try:
- if type(json_s["history"][0]) == str:
- logging.info("历史记录格式为旧版,正在转换……")
- new_history = []
- for index, item in enumerate(json_s["history"]):
- if index % 2 == 0:
- new_history.append(construct_user(item))
- else:
- new_history.append(construct_assistant(item))
- json_s["history"] = new_history
- logging.info(new_history)
- except:
- # 没有对话历史
- pass
- logging.debug(f"{user_name} 加载对话历史完毕")
- self.history = json_s["history"]
- return filename, json_s["system"], json_s["chatbot"]
- except FileNotFoundError:
- logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
- return filename, self.system_prompt, chatbot
-
- def like(self):
- """like the last response, implement if needed
- """
- return gr.update()
-
- def dislike(self):
- """dislike the last response, implement if needed
- """
- return gr.update()
diff --git a/spaces/segments-tobias/conex/espnet/asr/__init__.py b/spaces/segments-tobias/conex/espnet/asr/__init__.py
deleted file mode 100644
index b7f177368e62a5578b8706300e101f831a3972ac..0000000000000000000000000000000000000000
--- a/spaces/segments-tobias/conex/espnet/asr/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Initialize sub package."""
diff --git a/spaces/shgao/MDT/diffusion/timestep_sampler.py b/spaces/shgao/MDT/diffusion/timestep_sampler.py
deleted file mode 100644
index a3f369847677d8dbaaadb8297691b1be92cf189f..0000000000000000000000000000000000000000
--- a/spaces/shgao/MDT/diffusion/timestep_sampler.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Modified from OpenAI's diffusion repos
-# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
-# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
-# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
-
-from abc import ABC, abstractmethod
-
-import numpy as np
-import torch as th
-import torch.distributed as dist
-
-
-def create_named_schedule_sampler(name, diffusion):
- """
- Create a ScheduleSampler from a library of pre-defined samplers.
- :param name: the name of the sampler.
- :param diffusion: the diffusion object to sample for.
- """
- if name == "uniform":
- return UniformSampler(diffusion)
- elif name == "loss-second-moment":
- return LossSecondMomentResampler(diffusion)
- else:
- raise NotImplementedError(f"unknown schedule sampler: {name}")
-
-
-class ScheduleSampler(ABC):
- """
- A distribution over timesteps in the diffusion process, intended to reduce
- variance of the objective.
- By default, samplers perform unbiased importance sampling, in which the
- objective's mean is unchanged.
- However, subclasses may override sample() to change how the resampled
- terms are reweighted, allowing for actual changes in the objective.
- """
-
- @abstractmethod
- def weights(self):
- """
- Get a numpy array of weights, one per diffusion step.
- The weights needn't be normalized, but must be positive.
- """
-
- def sample(self, batch_size, device):
- """
- Importance-sample timesteps for a batch.
- :param batch_size: the number of timesteps.
- :param device: the torch device to save to.
- :return: a tuple (timesteps, weights):
- - timesteps: a tensor of timestep indices.
- - weights: a tensor of weights to scale the resulting losses.
- """
- w = self.weights()
- p = w / np.sum(w)
- indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
- indices = th.from_numpy(indices_np).long().to(device)
- weights_np = 1 / (len(p) * p[indices_np])
- weights = th.from_numpy(weights_np).float().to(device)
- return indices, weights
-
-
-class UniformSampler(ScheduleSampler):
- def __init__(self, diffusion):
- self.diffusion = diffusion
- self._weights = np.ones([diffusion.num_timesteps])
-
- def weights(self):
- return self._weights
-
-
-class LossAwareSampler(ScheduleSampler):
- def update_with_local_losses(self, local_ts, local_losses):
- """
- Update the reweighting using losses from a model.
- Call this method from each rank with a batch of timesteps and the
- corresponding losses for each of those timesteps.
- This method will perform synchronization to make sure all of the ranks
- maintain the exact same reweighting.
- :param local_ts: an integer Tensor of timesteps.
- :param local_losses: a 1D Tensor of losses.
- """
- batch_sizes = [
- th.tensor([0], dtype=th.int32, device=local_ts.device)
- for _ in range(dist.get_world_size())
- ]
- dist.all_gather(
- batch_sizes,
- th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
- )
-
- # Pad all_gather batches to be the maximum batch size.
- batch_sizes = [x.item() for x in batch_sizes]
- max_bs = max(batch_sizes)
-
- timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
- loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
- dist.all_gather(timestep_batches, local_ts)
- dist.all_gather(loss_batches, local_losses)
- timesteps = [
- x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
- ]
- losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
- self.update_with_all_losses(timesteps, losses)
-
- @abstractmethod
- def update_with_all_losses(self, ts, losses):
- """
- Update the reweighting using losses from a model.
- Sub-classes should override this method to update the reweighting
- using losses from the model.
- This method directly updates the reweighting without synchronizing
- between workers. It is called by update_with_local_losses from all
- ranks with identical arguments. Thus, it should have deterministic
- behavior to maintain state across workers.
- :param ts: a list of int timesteps.
- :param losses: a list of float losses, one per timestep.
- """
-
-
-class LossSecondMomentResampler(LossAwareSampler):
- def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
- self.diffusion = diffusion
- self.history_per_term = history_per_term
- self.uniform_prob = uniform_prob
- self._loss_history = np.zeros(
- [diffusion.num_timesteps, history_per_term], dtype=np.float64
- )
- self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
-
- def weights(self):
- if not self._warmed_up():
- return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
- weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
- weights /= np.sum(weights)
- weights *= 1 - self.uniform_prob
- weights += self.uniform_prob / len(weights)
- return weights
-
- def update_with_all_losses(self, ts, losses):
- for t, loss in zip(ts, losses):
- if self._loss_counts[t] == self.history_per_term:
- # Shift out the oldest loss term.
- self._loss_history[t, :-1] = self._loss_history[t, 1:]
- self._loss_history[t, -1] = loss
- else:
- self._loss_history[t, self._loss_counts[t]] = loss
- self._loss_counts[t] += 1
-
- def _warmed_up(self):
- return (self._loss_counts == self.history_per_term).all()
diff --git a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/dataloader.py b/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/dataloader.py
deleted file mode 100644
index 039b9ec3645b2a4626ff47c221e372f32a6ad339..0000000000000000000000000000000000000000
--- a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/dataloader.py
+++ /dev/null
@@ -1,425 +0,0 @@
-import torch
-import torch.multiprocessing as multiprocessing
-from torch._C import _set_worker_signal_handlers, \
- _remove_worker_pids, _error_if_any_worker_fails
-try:
- from torch._C import _set_worker_pids
-except:
- from torch._C import _update_worker_pids as _set_worker_pids
-from .sampler import SequentialSampler, RandomSampler, BatchSampler
-import signal
-import collections
-import re
-import sys
-import threading
-import traceback
-from torch._six import string_classes, int_classes
-import numpy as np
-
-if sys.version_info[0] == 2:
- import Queue as queue
-else:
- import queue
-
-
-class ExceptionWrapper(object):
- r"Wraps an exception plus traceback to communicate across threads"
-
- def __init__(self, exc_info):
- self.exc_type = exc_info[0]
- self.exc_msg = "".join(traceback.format_exception(*exc_info))
-
-
-_use_shared_memory = False
-"""Whether to use shared memory in default_collate"""
-
-
-def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id):
- global _use_shared_memory
- _use_shared_memory = True
-
- # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
- # module's handlers are executed after Python returns from C low-level
- # handlers, likely when the same fatal signal happened again already.
- # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1
- _set_worker_signal_handlers()
-
- torch.set_num_threads(1)
- torch.manual_seed(seed)
- np.random.seed(seed)
-
- if init_fn is not None:
- init_fn(worker_id)
-
- while True:
- r = index_queue.get()
- if r is None:
- break
- idx, batch_indices = r
- try:
- samples = collate_fn([dataset[i] for i in batch_indices])
- except Exception:
- data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
- else:
- data_queue.put((idx, samples))
-
-
-def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id):
- if pin_memory:
- torch.cuda.set_device(device_id)
-
- while True:
- try:
- r = in_queue.get()
- except Exception:
- if done_event.is_set():
- return
- raise
- if r is None:
- break
- if isinstance(r[1], ExceptionWrapper):
- out_queue.put(r)
- continue
- idx, batch = r
- try:
- if pin_memory:
- batch = pin_memory_batch(batch)
- except Exception:
- out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
- else:
- out_queue.put((idx, batch))
-
-numpy_type_map = {
- 'float64': torch.DoubleTensor,
- 'float32': torch.FloatTensor,
- 'float16': torch.HalfTensor,
- 'int64': torch.LongTensor,
- 'int32': torch.IntTensor,
- 'int16': torch.ShortTensor,
- 'int8': torch.CharTensor,
- 'uint8': torch.ByteTensor,
-}
-
-
-def default_collate(batch):
- "Puts each data field into a tensor with outer dimension batch size"
-
- error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
- elem_type = type(batch[0])
- if torch.is_tensor(batch[0]):
- out = None
- if _use_shared_memory:
- # If we're in a background process, concatenate directly into a
- # shared memory tensor to avoid an extra copy
- numel = sum([x.numel() for x in batch])
- storage = batch[0].storage()._new_shared(numel)
- out = batch[0].new(storage)
- return torch.stack(batch, 0, out=out)
- elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
- and elem_type.__name__ != 'string_':
- elem = batch[0]
- if elem_type.__name__ == 'ndarray':
- # array of string classes and object
- if re.search('[SaUO]', elem.dtype.str) is not None:
- raise TypeError(error_msg.format(elem.dtype))
-
- return torch.stack([torch.from_numpy(b) for b in batch], 0)
- if elem.shape == (): # scalars
- py_type = float if elem.dtype.name.startswith('float') else int
- return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
- elif isinstance(batch[0], int_classes):
- return torch.LongTensor(batch)
- elif isinstance(batch[0], float):
- return torch.DoubleTensor(batch)
- elif isinstance(batch[0], string_classes):
- return batch
- elif isinstance(batch[0], collections.Mapping):
- return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
- elif isinstance(batch[0], collections.Sequence):
- transposed = zip(*batch)
- return [default_collate(samples) for samples in transposed]
-
- raise TypeError((error_msg.format(type(batch[0]))))
-
-
-def pin_memory_batch(batch):
- if torch.is_tensor(batch):
- return batch.pin_memory()
- elif isinstance(batch, string_classes):
- return batch
- elif isinstance(batch, collections.Mapping):
- return {k: pin_memory_batch(sample) for k, sample in batch.items()}
- elif isinstance(batch, collections.Sequence):
- return [pin_memory_batch(sample) for sample in batch]
- else:
- return batch
-
-
-_SIGCHLD_handler_set = False
-"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
-handler needs to be set for all DataLoaders in a process."""
-
-
-def _set_SIGCHLD_handler():
- # Windows doesn't support SIGCHLD handler
- if sys.platform == 'win32':
- return
- # can't set signal in child threads
- if not isinstance(threading.current_thread(), threading._MainThread):
- return
- global _SIGCHLD_handler_set
- if _SIGCHLD_handler_set:
- return
- previous_handler = signal.getsignal(signal.SIGCHLD)
- if not callable(previous_handler):
- previous_handler = None
-
- def handler(signum, frame):
- # This following call uses `waitid` with WNOHANG from C side. Therefore,
- # Python can still get and update the process status successfully.
- _error_if_any_worker_fails()
- if previous_handler is not None:
- previous_handler(signum, frame)
-
- signal.signal(signal.SIGCHLD, handler)
- _SIGCHLD_handler_set = True
-
-
-class DataLoaderIter(object):
- "Iterates once over the DataLoader's dataset, as specified by the sampler"
-
- def __init__(self, loader):
- self.dataset = loader.dataset
- self.collate_fn = loader.collate_fn
- self.batch_sampler = loader.batch_sampler
- self.num_workers = loader.num_workers
- self.pin_memory = loader.pin_memory and torch.cuda.is_available()
- self.timeout = loader.timeout
- self.done_event = threading.Event()
-
- self.sample_iter = iter(self.batch_sampler)
-
- if self.num_workers > 0:
- self.worker_init_fn = loader.worker_init_fn
- self.index_queue = multiprocessing.SimpleQueue()
- self.worker_result_queue = multiprocessing.SimpleQueue()
- self.batches_outstanding = 0
- self.worker_pids_set = False
- self.shutdown = False
- self.send_idx = 0
- self.rcvd_idx = 0
- self.reorder_dict = {}
-
- base_seed = torch.LongTensor(1).random_(0, 2**31-1)[0]
- self.workers = [
- multiprocessing.Process(
- target=_worker_loop,
- args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn,
- base_seed + i, self.worker_init_fn, i))
- for i in range(self.num_workers)]
-
- if self.pin_memory or self.timeout > 0:
- self.data_queue = queue.Queue()
- if self.pin_memory:
- maybe_device_id = torch.cuda.current_device()
- else:
- # do not initialize cuda context if not necessary
- maybe_device_id = None
- self.worker_manager_thread = threading.Thread(
- target=_worker_manager_loop,
- args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory,
- maybe_device_id))
- self.worker_manager_thread.daemon = True
- self.worker_manager_thread.start()
- else:
- self.data_queue = self.worker_result_queue
-
- for w in self.workers:
- w.daemon = True # ensure that the worker exits on process exit
- w.start()
-
- _set_worker_pids(id(self), tuple(w.pid for w in self.workers))
- _set_SIGCHLD_handler()
- self.worker_pids_set = True
-
- # prime the prefetch loop
- for _ in range(2 * self.num_workers):
- self._put_indices()
-
- def __len__(self):
- return len(self.batch_sampler)
-
- def _get_batch(self):
- if self.timeout > 0:
- try:
- return self.data_queue.get(timeout=self.timeout)
- except queue.Empty:
- raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
- else:
- return self.data_queue.get()
-
- def __next__(self):
- if self.num_workers == 0: # same-process loading
- indices = next(self.sample_iter) # may raise StopIteration
- batch = self.collate_fn([self.dataset[i] for i in indices])
- if self.pin_memory:
- batch = pin_memory_batch(batch)
- return batch
-
- # check if the next sample has already been generated
- if self.rcvd_idx in self.reorder_dict:
- batch = self.reorder_dict.pop(self.rcvd_idx)
- return self._process_next_batch(batch)
-
- if self.batches_outstanding == 0:
- self._shutdown_workers()
- raise StopIteration
-
- while True:
- assert (not self.shutdown and self.batches_outstanding > 0)
- idx, batch = self._get_batch()
- self.batches_outstanding -= 1
- if idx != self.rcvd_idx:
- # store out-of-order samples
- self.reorder_dict[idx] = batch
- continue
- return self._process_next_batch(batch)
-
- next = __next__ # Python 2 compatibility
-
- def __iter__(self):
- return self
-
- def _put_indices(self):
- assert self.batches_outstanding < 2 * self.num_workers
- indices = next(self.sample_iter, None)
- if indices is None:
- return
- self.index_queue.put((self.send_idx, indices))
- self.batches_outstanding += 1
- self.send_idx += 1
-
- def _process_next_batch(self, batch):
- self.rcvd_idx += 1
- self._put_indices()
- if isinstance(batch, ExceptionWrapper):
- raise batch.exc_type(batch.exc_msg)
- return batch
-
- def __getstate__(self):
- # TODO: add limited pickling support for sharing an iterator
- # across multiple threads for HOGWILD.
- # Probably the best way to do this is by moving the sample pushing
- # to a separate thread and then just sharing the data queue
- # but signalling the end is tricky without a non-blocking API
- raise NotImplementedError("DataLoaderIterator cannot be pickled")
-
- def _shutdown_workers(self):
- try:
- if not self.shutdown:
- self.shutdown = True
- self.done_event.set()
- # if worker_manager_thread is waiting to put
- while not self.data_queue.empty():
- self.data_queue.get()
- for _ in self.workers:
- self.index_queue.put(None)
- # done_event should be sufficient to exit worker_manager_thread,
- # but be safe here and put another None
- self.worker_result_queue.put(None)
- finally:
- # removes pids no matter what
- if self.worker_pids_set:
- _remove_worker_pids(id(self))
- self.worker_pids_set = False
-
- def __del__(self):
- if self.num_workers > 0:
- self._shutdown_workers()
-
-
-class DataLoader(object):
- """
- Data loader. Combines a dataset and a sampler, and provides
- single- or multi-process iterators over the dataset.
-
- Arguments:
- dataset (Dataset): dataset from which to load the data.
- batch_size (int, optional): how many samples per batch to load
- (default: 1).
- shuffle (bool, optional): set to ``True`` to have the data reshuffled
- at every epoch (default: False).
- sampler (Sampler, optional): defines the strategy to draw samples from
- the dataset. If specified, ``shuffle`` must be False.
- batch_sampler (Sampler, optional): like sampler, but returns a batch of
- indices at a time. Mutually exclusive with batch_size, shuffle,
- sampler, and drop_last.
- num_workers (int, optional): how many subprocesses to use for data
- loading. 0 means that the data will be loaded in the main process.
- (default: 0)
- collate_fn (callable, optional): merges a list of samples to form a mini-batch.
- pin_memory (bool, optional): If ``True``, the data loader will copy tensors
- into CUDA pinned memory before returning them.
- drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
- if the dataset size is not divisible by the batch size. If ``False`` and
- the size of dataset is not divisible by the batch size, then the last batch
- will be smaller. (default: False)
- timeout (numeric, optional): if positive, the timeout value for collecting a batch
- from workers. Should always be non-negative. (default: 0)
- worker_init_fn (callable, optional): If not None, this will be called on each
- worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
- input, after seeding and before data loading. (default: None)
-
- .. note:: By default, each worker will have its PyTorch seed set to
- ``base_seed + worker_id``, where ``base_seed`` is a long generated
- by main process using its RNG. You may use ``torch.initial_seed()`` to access
- this value in :attr:`worker_init_fn`, which can be used to set other seeds
- (e.g. NumPy) before data loading.
-
- .. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an
- unpicklable object, e.g., a lambda function.
- """
-
- def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
- num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False,
- timeout=0, worker_init_fn=None):
- self.dataset = dataset
- self.batch_size = batch_size
- self.num_workers = num_workers
- self.collate_fn = collate_fn
- self.pin_memory = pin_memory
- self.drop_last = drop_last
- self.timeout = timeout
- self.worker_init_fn = worker_init_fn
-
- if timeout < 0:
- raise ValueError('timeout option should be non-negative')
-
- if batch_sampler is not None:
- if batch_size > 1 or shuffle or sampler is not None or drop_last:
- raise ValueError('batch_sampler is mutually exclusive with '
- 'batch_size, shuffle, sampler, and drop_last')
-
- if sampler is not None and shuffle:
- raise ValueError('sampler is mutually exclusive with shuffle')
-
- if self.num_workers < 0:
- raise ValueError('num_workers cannot be negative; '
- 'use num_workers=0 to disable multiprocessing.')
-
- if batch_sampler is None:
- if sampler is None:
- if shuffle:
- sampler = RandomSampler(dataset)
- else:
- sampler = SequentialSampler(dataset)
- batch_sampler = BatchSampler(sampler, batch_size, drop_last)
-
- self.sampler = sampler
- self.batch_sampler = batch_sampler
-
- def __iter__(self):
- return DataLoaderIter(self)
-
- def __len__(self):
- return len(self.batch_sampler)
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Animal Revolt Battle Simulator The Ultimate Animal Combat Game for PC.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Animal Revolt Battle Simulator The Ultimate Animal Combat Game for PC.md
deleted file mode 100644
index 4b347817c8449c26331b90bf7649629370c39b5e..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Animal Revolt Battle Simulator The Ultimate Animal Combat Game for PC.md
+++ /dev/null
@@ -1,208 +0,0 @@
-
-Animal Revolt Battle Simulator: A Physics-Based Sandbox Game
-Do you love watching animals fight each other in a realistic and hilarious way? Do you want to create your own epic battles between different types of creatures, from dinosaurs to dragons, from sharks to goats, and even from godzilla to t-rex? Do you want to join the action yourself and blast away the enemy with some powerful guns? If you answered yes to any of these questions, then you should definitely check out Animal Revolt Battle Simulator, a physics-based sandbox game that lets you do all of these things and more!
-animal revolt battle simulator free download 2022 pc DOWNLOAD ⚹⚹⚹ https://ssurll.com/2uNSAs
-What is Animal Revolt Battle Simulator?
-Animal Revolt Battle Simulator is a game developed by VDimension and published by VDimension and Yodo1 Ltd. It was released on Steam on April 15, 2022, and has received overwhelmingly positive reviews from players and critics alike. It is also available on Google Play and Nintendo Switch.
-The main features of the game
-Animal Revolt Battle Simulator is a game that offers you ultimate freedom and creativity in creating funny and chaotic battles between all sorts of ragdoll creatures. You can:
-
-Build your own maps or pick from a selection of ready-made ones.
-Place up to seven opposing armies made of different types of beasts and watch them tear each other apart in an epic battle!
-Create your own custom monsters by combining different body parts and weapons. You can attach as many body parts and weapons as you want, anywhere you want!
-Download a vast selection of custom monsters, maps, and buildings created by other players from the Steam Workshop. You can also upload your own creations for other people to try out.
-Test your tactical and strategic expertise in the campaign mode. Pick the right beasts, place them in the right place, and command them to defeat the enemy.
-Join the fight yourself in the first-person mode and blow the enemy away with some powerful guns!
-
-How to play Animal Revolt Battle Simulator?
-The game is very easy to play and has a simple interface. You can use your mouse to drag and drop units on the map, rotate them, scale them, clone them, delete them, etc. You can also use your keyboard to move around the map, zoom in and out, change the camera angle, etc. You can also use hotkeys to access different menus and options.
-animal revolt battle simulator pc download free full version
-animal revolt battle simulator 2022 free download for windows 10
-how to download animal revolt battle simulator on pc for free
-animal revolt battle simulator official game free download pc
-animal revolt battle simulator pc game free download highly compressed
-animal revolt battle simulator free download latest version pc
-animal revolt battle simulator pc game crack free download
-animal revolt battle simulator free download steamunlocked
-animal revolt battle simulator pc game system requirements
-animal revolt battle simulator gameplay free download pc
-animal revolt battle simulator mod apk free download for pc
-animal revolt battle simulator online multiplayer free download pc
-animal revolt battle simulator cheats codes free download pc
-animal revolt battle simulator sandbox mode free download pc
-animal revolt battle simulator best battles free download pc
-animal revolt battle simulator dinosaurs free download pc
-animal revolt battle simulator godzilla free download pc
-animal revolt battle simulator custom creatures free download pc
-animal revolt battle simulator realistic physics free download pc
-animal revolt battle simulator ragdoll effects free download pc
-animal revolt battle simulator campaign mode free download pc
-animal revolt battle simulator tips and tricks free download pc
-animal revolt battle simulator review free download pc
-animal revolt battle simulator android emulator free download pc
-animal revolt battle simulator bluestacks app player free download pc
-animal revolt battle simulator mumu player free download pc
-animal revolt battle simulator nox player free download pc
-animal revolt battle simulator ldplayer free download pc
-animal revolt battle simulator memu play free download pc
-animal revolt battle simulator gameloop free download pc
-animal revolt battle simulator smartgaga free download pc
-animal revolt battle simulator koplayer free download pc
-animal revolt battle simulator droid4x free download pc
-animal revolt battle simulator genymotion free download pc
-animal revolt battle simulator andy emulator free download pc
-animal revolt battle simulator remix os player free download pc
-animal revolt battle simulator phoenix os player free download pc
-animal revolt battle simulator prime os player free download pc
-animal revolt battle simulator tencent gaming buddy free download pc
-animal revolt battle simulator mebox emulator free download pc
-animal revolt battle simulator windroye emulator free download pc
-animal revolt battle simulator amiduos emulator free download pc
-animal revolt battle simulator leapdroid emulator free download pc
-animal revolt battle simulator youwave emulator free download pc
-animal revolt battle simulator jar of beans emulator free download pc
-animal revolt battle simulator console os emulator free download pc
-Once you have placed your units on the map, you can press the start button to begin the battle. You can watch the battle unfold from different perspectives, such as top-down, side-view, or free camera. You can also pause, resume, slow down, or speed up the battle at any time. You can also switch to the first-person mode and join the battle yourself.
-The game has a physics-based engine that makes the battles realistic and hilarious. You can see the limbs bending, necks twisting, bodies flying around, blood splattering, etc. The game also has ragdoll effects that make the creatures flop around when they die or get hit. The game also has sound effects that add to the immersion and humor of the game.
-How to download Animal Revolt Battle Simulator for free on PC?
-If you want to download Animal Revolt Battle Simulator for free on your PC, you need to follow some steps. However, before you do that, you need to make sure that your PC meets the minimum system requirements for the game. Here are the system requirements for Animal Revolt Battle Simulator:
-The system requirements for the game
-
-
-Minimum
-Recommended
-
-
-OS: Windows 7 or newer
-OS: Windows 10
-
-
-Processor: Intel Core i5-2400 @ 3.1 GHz or AMD FX-6300 @ 3.5 GHz or equivalent
-Processor: Intel Core i7-4770 @ 3.4 GHz or AMD Ryzen 5 1600 @ 3.2 GHz or equivalent
-
-
-Memory: 8 GB RAM
-Memory: 16 GB RAM
-
-
-Graphics: NVIDIA GeForce GTX 670 or AMD R9 270 (2GB VRAM with Shader Model 5.0 or better)
-Graphics: NVIDIA GeForce GTX 970 or AMD R9 290X (4GB VRAM with Shader Model 5.0 or better)
-
-
-DirectX: Version 11
-DirectX: Version 11
-
-
-Storage: 4 GB available space
-Storage: 4 GB available space
-
-
-Sound Card: DirectX compatible sound card with latest drivers
-Sound Card: DirectX compatible sound card with latest drivers
-
-
- The steps to download and install the game
- If your PC meets the system requirements, you can follow these steps to download and install Animal Revolt Battle Simulator for free on your PC:
-
-Go to a trusted and reliable website that offers free downloads of PC games, such as [Ocean of Games] or [Steam Unlocked].
-Search for Animal Revolt Battle Simulator in the search bar and click on the game link.
-Read the description and instructions carefully and make sure you have enough space on your hard drive.
-Click on the download button and wait for the download to finish.
-Extract the zip file using WinRAR or 7-Zip and open the extracted folder.
-Run the setup.exe file as administrator and follow the installation steps.
-Copy the crack files from the crack folder and paste them into the game installation folder.
-Launch the game from the desktop shortcut or the game folder and enjoy!
- The benefits of downloading the game from a trusted source
- There are many benefits of downloading Animal Revolt Battle Simulator from a trusted source, such as:
-
- You can get the game for free without paying any money.
- You can get the latest version of the game with all the updates and bug fixes.
- You can get a safe and virus-free download without any malware or spyware.
- You can get a fast and smooth download without any interruptions or errors.
- You can get a full and complete game without any missing files or features.
- You can get a user-friendly and easy-to-use interface with clear instructions and support.
- You can get access to a large community of gamers who share their creations, feedback, and tips.
-
- How to create your own custom creatures and maps in Animal Revolt Battle Simulator?
- One of the most fun and creative aspects of Animal Revolt Battle Simulator is that you can create your own custom creatures and maps using the in-game tools. You can also download and use other players' creations from the Steam Workshop. Here is how you can do that:
- The unit creator tool
- The unit creator tool allows you to create your own custom monsters by combining different body parts and weapons. You can access this tool by clicking on the unit creator button on the main menu. You can then:
-
- Select a base body from a variety of animals, such as lions, bears, crocodiles, elephants, etc.
- Add different body parts, such as heads, legs, wings, tails, horns, etc., from different animals. You can also add human body parts, such as arms, hands, feet, etc.
- Add different weapons, such as swords, axes, guns, rockets, lasers, etc., to any body part. You can also add shields, armor, helmets, etc., for extra protection.
- Adjust the size, position, rotation, color, texture , and transparency of each body part and weapon. You can also use the sliders to change the mass, health, damage, speed, and range of each unit.
- Save your custom unit and give it a name and a description. You can also choose a category and a faction for your unit.
- Use your custom unit in the sandbox mode or the campaign mode. You can also upload your custom unit to the Steam Workshop for other players to download and use.
-
- The map editor tool
- The map editor tool allows you to create your own custom maps by placing different objects and terrain on a flat surface. You can access this tool by clicking on the map editor button on the main menu. You can then:
-
- Select a map size from small, medium, large, or huge.
- Select a terrain type from grass, sand, snow, water, lava, etc.
- Select a skybox from day, night, sunset, etc.
- Place different objects on the map, such as buildings, trees, rocks, bridges, fences, etc. You can also place animals and humans as static or dynamic objects.
- Adjust the size, position, rotation, color, texture, and transparency of each object. You can also use the sliders to change the gravity, wind, fog, etc., of the map.
- Save your custom map and give it a name and a description. You can also upload your custom map to the Steam Workshop for other players to download and use.
-
- The workshop and community creations
- The workshop is a feature that allows you to browse, download, and use other players' custom units and maps. You can access this feature by clicking on the workshop button on the main menu. You can then:
-
- Search for custom units and maps by name, category, rating, popularity, etc.
- View the details and screenshots of each custom unit and map. You can also read the comments and reviews from other players.
- Download the custom units and maps that you like and add them to your library.
- Use the custom units and maps in the sandbox mode or the campaign mode. You can also rate and review them after using them.
-
- How to join the battles yourself in Animal Revolt Battle Simulator?
- If you want to join the battles yourself and have some fun shooting at the enemy creatures, you can do that by switching to the first-person mode. Here is how you can do that:
- The first-person mode
- The first-person mode is a feature that allows you to control one of your units in the battle and see the action from their perspective. You can access this feature by pressing the F key on your keyboard during a battle. You can then:
-
-Move around using the WASD keys or the arrow keys.
-Aim using your mouse cursor.
-Shoot using the left mouse button.
-Reload using the R key.
-Switch weapons using the Q key or the mouse wheel.
-Crouch using the C key.
-Jump using the spacebar.
-
-The weapons and guns available
-In the first-person mode, you can use different weapons and guns to shoot at the enemy creatures. Some of these weapons are:
-
-Pistol: A basic handgun that has low damage but high accuracy and fire rate.
-Shotgun: A powerful shotgun that has high damage but low accuracy and fire rate.
-Rifle: A semi-automatic rifle that has medium damage and accuracy but high fire rate.
-Sniper: A long-range sniper rifle that has high damage and accuracy but low fire rate.
-Rocket Launcher: A devastating rocket launcher that has very high damage but low accuracy and fire rate.
-Laser Gun: A futuristic laser gun that has medium damage but high accuracy and fire rate.
-
-The tips and tricks for surviving the battles
-If you want to survive the battles in the first-person mode, you need to follow some tips and tricks. Here are some of them:
-
-Pick a weapon that suits your playstyle and situation. For example, use a shotgun for close-range combat or a sniper for long-range combat.
-Aim for the head or other weak spots of the enemy creatures to deal more damage and kill them faster.
-Reload your weapon before you run out of ammo or when you are in cover.
-Use cover to avoid getting hit by enemy fire or attacks. You can hide behind buildings , trees, rocks, fences, etc., and peek out to shoot.
-Keep moving and don't stay in one place for too long. The enemy creatures will chase you and attack you from different directions.
-Use the environment to your advantage. You can shoot explosive barrels, gas tanks, or vehicles to cause explosions and damage the enemy creatures.
-Watch your health and stamina bars. You can heal yourself by picking up health packs or eating food. You can also restore your stamina by resting or drinking water.
-Have fun and experiment with different combinations of units, weapons, and maps. You can create some hilarious and epic scenarios in the game!
-
- Conclusion
- Animal Revolt Battle Simulator is a physics-based sandbox game that lets you create and watch funny and chaotic battles between all sorts of ragdoll creatures. You can also create your own custom monsters and maps using the in-game tools or download other players' creations from the Steam Workshop. You can also join the battles yourself in the first-person mode and shoot at the enemy creatures with some powerful guns. The game is easy to play and has a simple interface, but it also offers a lot of freedom and creativity for you to enjoy. The game is also realistic and hilarious, thanks to its physics-based engine, ragdoll effects, sound effects, etc. The game is available for free on PC, as well as on Google Play and Nintendo Switch. If you are looking for a fun and entertaining game that will make you laugh and keep you hooked for hours, then you should definitely download Animal Revolt Battle Simulator today!
- FAQs
- Here are some frequently asked questions about Animal Revolt Battle Simulator:
-
-Q: How many units can I place on the map?
-A: You can place up to 1000 units on the map, depending on the map size and your PC performance.
-Q: How many custom units can I create?
-A: You can create up to 100 custom units using the unit creator tool.
-Q: How many custom maps can I create?
-A: You can create up to 100 custom maps using the map editor tool.
-Q: How can I share my custom units and maps with other players?
-A: You can share your custom units and maps with other players by uploading them to the Steam Workshop. You can also download other players' creations from there.
-Q: How can I contact the developers of the game?
-A: You can contact the developers of the game by sending them an email at vdimensiongames@gmail.com or by visiting their website at www.vdimensiongames.com.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/snjyor/ChatGPT_demo/README.md b/spaces/snjyor/ChatGPT_demo/README.md
deleted file mode 100644
index 6c426d5e206d23fcb9ee71737368ae031a64baf8..0000000000000000000000000000000000000000
--- a/spaces/snjyor/ChatGPT_demo/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ChatGPT_demo
-emoji: 💻
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.20.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-large-patch14.md b/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-large-patch14.md
deleted file mode 100644
index 11384803a049e29bf6f260fb0399a3bb808f4224..0000000000000000000000000000000000000000
--- a/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-large-patch14.md
+++ /dev/null
@@ -1,136 +0,0 @@
-# Model Card: CLIP
-
-Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md).
-
-## Model Details
-
-The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within.
-
-### Model Date
-
-January 2021
-
-### Model Type
-
-The base model uses a ViT-L/14 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss.
-
-The original implementation had two variants: one using a ResNet image encoder and the other using a Vision Transformer. This repository has the variant with the Vision Transformer.
-
-
-### Documents
-
-- [Blog Post](https://openai.com/blog/clip/)
-- [CLIP Paper](https://arxiv.org/abs/2103.00020)
-
-
-### Use with Transformers
-
-```python
-from PIL import Image
-import requests
-
-from transformers import CLIPProcessor, CLIPModel
-
-model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
-processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
-
-url = "http://images.cocodataset.org/val2017/000000039769.jpg"
-image = Image.open(requests.get(url, stream=True).raw)
-
-inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
-
-outputs = model(**inputs)
-logits_per_image = outputs.logits_per_image # this is the image-text similarity score
-probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
-```
-
-
-## Model Use
-
-### Intended Use
-
-The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis.
-
-#### Primary intended uses
-
-The primary intended users of these models are AI researchers.
-
-We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models.
-
-### Out-of-Scope Use Cases
-
-**Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful.
-
-Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use.
-
-Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases.
-
-
-
-## Data
-
-The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users.
-
-### Data Mission Statement
-
-Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset.
-
-
-
-## Performance and Limitations
-
-### Performance
-
-We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets:
-
-- Food101
-- CIFAR10
-- CIFAR100
-- Birdsnap
-- SUN397
-- Stanford Cars
-- FGVC Aircraft
-- VOC2007
-- DTD
-- Oxford-IIIT Pet dataset
-- Caltech101
-- Flowers102
-- MNIST
-- SVHN
-- IIIT5K
-- Hateful Memes
-- SST-2
-- UCF101
-- Kinetics700
-- Country211
-- CLEVR Counting
-- KITTI Distance
-- STL-10
-- RareAct
-- Flickr30
-- MSCOCO
-- ImageNet
-- ImageNet-A
-- ImageNet-R
-- ImageNet Sketch
-- ObjectNet (ImageNet Overlap)
-- Youtube-BB
-- ImageNet-Vid
-
-## Limitations
-
-CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance.
-
-### Bias and Fairness
-
-We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper).
-
-We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks.
-
-
-
-## Feedback
-
-### Where to send questions or comments about the model
-
-Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9)
\ No newline at end of file
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py
deleted file mode 100644
index 3279dae89a8bca95178bbe1285d3cb334890b12f..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py
+++ /dev/null
@@ -1,287 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import mmap
-import os
-import shutil
-import struct
-import typing as tp
-from functools import lru_cache
-
-import numpy as np
-import torch
-from fairseq.data import indexed_dataset
-from fairseq.data.huffman import HuffmanCoder
-from fairseq.file_io import PathManager
-
-
-class HuffmanMMapIndex:
- """
- keep an index of the offsets in the huffman binary file.
- First a header, then the list of sizes (num tokens) for each instance and finally
- the addresses of each instance.
- """
-
- _HDR_MAGIC = b"HUFFIDX\x00\x00"
- _VERSION = 1
-
- @classmethod
- def writer(cls, path: str, data_len: int):
- class _Writer:
- def __enter__(self):
- self._file = open(path, "wb")
-
- # write header (magic + version)
- self._file.write(cls._HDR_MAGIC)
- self._file.write(struct.pack(" None:
- self._path_prefix = path_prefix
- self._coder = coder
- self._sizes = []
- self._ptrs = []
- self._data_len = 0
-
- def open(self):
- self._coder.to_file(vocab_file_path(self._path_prefix))
- self._data_file = open(indexed_dataset.data_file_path(self._path_prefix), "wb")
-
- def __enter__(self) -> "HuffmanMMapIndexedDatasetBuilder":
- self.open()
- return self
-
- def add_item(self, tokens: tp.List[str]) -> None:
- """
- add a list of tokens to the dataset, they will compressed with the
- provided coder before being written to file.
- """
- encoded = self._coder.encode(tokens)
- code_len = len(encoded)
- last_ptr = 0
- if len(self._ptrs) > 0:
- last_ptr = self._ptrs[-1]
- self._sizes.append(len(tokens))
- self._ptrs.append(last_ptr + code_len)
- self._data_len += code_len
- self._data_file.write(encoded)
-
- def append(self, other_dataset_path_prefix: str) -> None:
- """
- append an existing dataset.
- Beware, if it wasn't built with the same coder, you are in trouble.
- """
- other_index = HuffmanMMapIndex(
- indexed_dataset.index_file_path(other_dataset_path_prefix)
- )
- for (ptr, size) in other_index:
- self._ptrs.append(ptr + self._data_len)
- self._sizes.append(size)
-
- # Concatenate data
- with open(indexed_dataset.data_file_path(other_dataset_path_prefix), "rb") as f:
- shutil.copyfileobj(f, self._data_file)
-
- self._data_len += other_index.data_len
-
- def close(self):
- self._data_file.close()
- with HuffmanMMapIndex.writer(
- indexed_dataset.index_file_path(self._path_prefix), self._data_len
- ) as index:
- index.write(self._sizes, self._ptrs)
-
- def __exit__(self, exc_type, exc_val, exc_tb) -> None:
- self.close()
diff --git a/spaces/stable-diffusion-ai/upscaling/README.md b/spaces/stable-diffusion-ai/upscaling/README.md
deleted file mode 100644
index 230b13526e18ad81e52a1b27331ddbce7a2b7918..0000000000000000000000000000000000000000
--- a/spaces/stable-diffusion-ai/upscaling/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Stable Diffusion - Image Upscaling
-emoji: 🚀
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.4.1
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: ai-art/upscaling
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Astrological Charts Pro 9.0.8 APK [Paid] [Full] EXCLUSIVE.md b/spaces/stomexserde/gpt4-ui/Examples/Astrological Charts Pro 9.0.8 APK [Paid] [Full] EXCLUSIVE.md
deleted file mode 100644
index 60d8eb6571e617732a4de054265d610b649b4ce9..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Astrological Charts Pro 9.0.8 APK [Paid] [Full] EXCLUSIVE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-Astrological Charts Pro 9.0.8 APK [Paid] [Full]: A Professional Astrology App for Android
-If you are looking for a professional astrological program for Android, you might want to check out Astrological Charts Pro 9.0.8 APK [Paid] [Full]. This app is designed to provide you with 12 types of astrological charts, including planets, asteroids, fictitious points, lots, aspects, houses, and more. You can also access interpretations of natal and transit charts, synastry and composite charts, progressions and directions, returns and lunar phases, and harmonics.
-Astrological Charts Pro 9.0.8 APK [Paid] [Full] DOWNLOAD — https://urlgoal.com/2uI8lE
-Astrological Charts Pro 9.0.8 APK [Paid] [Full] has a database of about 100000 places with specified time zones, so you don't have to worry about calculating the difference with GMT. You can also add new places if you want. The app calculates exact dates of triggering aspects, periods of aspects by orb, moments of sign changes, lunar phases, eclipses, void of course Moon, midpoints and planetary hours in the menu of the main page. You can choose between Tropical and Sidereal zodiac in the app.
-Astrological Charts Pro 9.0.8 APK [Paid] [Full] is not only a longitude calculator, but also provides data such as latitude, declination and parallels aspects for 10 planets. You can customize the orbs and the house systems according to your preference. There are 11 house systems and 22 types of aspect available in the app.
-If you want to download Astrological Charts Pro 9.0.8 APK [Paid] [Full], you can find it on Google Play Store[^1^], APKCombo[^2^], or Aptoide[^3^]. The app costs $19.99 and requires Android 4.4 or higher to run. It has a rating of 4.7 out of 5 stars on Google Play Store based on 566 reviews.
-
-Astrological Charts Pro 9.0.8 APK [Paid] [Full] is a must-have app for anyone who is interested in astrology and wants to have a professional tool at their fingertips. Whether you want to analyze your own chart, compare it with others, or explore different astrological techniques, this app will help you do it with ease and accuracy.
Here are some of the features and benefits of Astrological Charts Pro 9.0.8 APK [Paid] [Full] that you can enjoy:
-
-12 types of astrological charts : You can generate and view different types of charts, such as natal, transit, synastry, composite, progressions, directions, profections, returns, lunar phases, and harmonics. You can also switch between one radix chart and dual radix chart modes.
-13 asteroids and 23 fictitious points : You can include more factors in your analysis, such as Chiron, Ceres, Pallas, Juno, Vesta, Lilith, Vertex, Part of Fortune, and more. You can also add or remove any asteroid or point from the chart.
-Interpretations : You can read the meanings of natal planets in zodiac signs, in houses and in retrograde state, transit planets in natal houses, natal aspects, from transit to natal aspects, synastry aspects, natal Ascendent and houses in signs. The app also provides a Google search option for any interpretation that is not available.
-Customizable orbs and house systems : You can adjust the orbs for each aspect and planet according to your preference. You can also choose from 11 house systems, such as Placidus, Koch, Equal, Whole Sign, Campanus, Regiomontanus, Porphyry, Morinus, Alcabitius, Meridian and Axial Rotation System.
-Data accuracy and convenience : You can rely on the app's database of about 100000 places with specified time zones to calculate the charts correctly. You can also add new places if you need to. The app also shows you the exact dates of triggering aspects, periods of aspects by orb, moments of sign changes, lunar phases, eclipses, void of course Moon, midpoints and planetary hours in the menu of the main page.
-Tropical and Sidereal zodiac : You can switch between the two zodiac systems in the app. You can also choose from different ayanamsas for the Sidereal zodiac.
-Data export and import : You can save your charts as PDF files or images and share them with others. You can also import data from other astrological programs or websites using CSV files.
-
-Astrological Charts Pro 9.0.8 APK [Paid] [Full] is a comprehensive and professional astrological program for Android that will satisfy your astrological needs. Download it today and discover the secrets of the stars!
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Chick Corea A Work In Progress Pdf 24 [PATCHED].md b/spaces/stomexserde/gpt4-ui/Examples/Chick Corea A Work In Progress Pdf 24 [PATCHED].md
deleted file mode 100644
index e3276d180274d57bda1b1ae32f39646f53ec872f..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Chick Corea A Work In Progress Pdf 24 [PATCHED].md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-Chick Corea's A Work in Progress: A Treasure of Musical Knowledge
-Chick Corea was one of the most influential and prolific jazz pianists and composers of the 20th and 21st centuries. He left behind a legacy of musical innovation and creativity that spans genres and generations. He also left behind a treasure of musical knowledge: his book A Work in Progress ... On Being a Musician.
-A Work in Progress is a collection of Chick Corea's notes and essays on various aspects of music and musicianship, such as creativity, practice, performance, communication, and aesthetics. He shares his insights and experiences from his long and successful career, as well as his personal philosophy and approach to music. He answers common questions that musicians face, such as:
-chick corea a work in progress pdf 24 Download ⚹⚹⚹ https://urlgoal.com/2uI89L
-
-What is the single most important element in making good music?
-How can one gain the ability to completely originate one's own music?
-How much time and effort should go into getting a single musical product?
-What's the best way to evaluate one's own live performance?
-What can one do about a "difficult" audience?
-Can others' opinions on your music serve some useful purpose?
-
-The book is not a typical instructional manual or textbook. It is more like a conversation with a master musician who generously shares his wisdom and advice. It is also a work in progress, as Chick Corea intended to add more chapters and answer more questions from his fans and students. The book is available in English and Spanish-language editions, exclusively at Chick's official store.
-One of the chapters that illustrates Chick Corea's musical mind is chapter 24, titled "Pulse and Time Flow". In this chapter, he explains how he imagines a pulse while laying phrases over it, how he creates tempo intentionally, how he uses different subdivisions of the beat to create rhythmic variety and interest, and how he relates to other musicians in a group setting. He also gives some practical exercises to improve one's sense of pulse and time flow.
-A Work in Progress by Chick Corea is a valuable resource for any musician who wants to learn from one of the greatest artists of our time. It is also a testament to Chick Corea's love for music and his dedication to sharing it with others. As he writes in the introduction:
-"I hope you can use some of it to your benefit and success in making music and being a musician."
-
-If you want to read A Work in Progress by Chick Corea, you can download it as a PDF file from his official website. You can also find some excerpts and samples of the book on Scribd and PDFSLIDE.NET. These websites allow you to preview some of the pages and chapters of the book before you buy it. You can also read some reviews and comments from other readers who have enjoyed the book.
-One of the benefits of reading A Work in Progress by Chick Corea as a PDF file is that you can easily access it on your computer, tablet, or smartphone. You can also print it out if you prefer to read it on paper. You can also highlight, bookmark, and annotate the PDF file as you read it. This way, you can make notes of the parts that interest you or that you want to practice later.
-A Work in Progress by Chick Corea is not only a book for jazz musicians or pianists. It is a book for anyone who loves music and wants to improve their musical skills and knowledge. It is a book that will inspire you, challenge you, and entertain you. It is a book that will make you appreciate Chick Corea's music even more.
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/stratussox/yolov5_inference/app.py b/spaces/stratussox/yolov5_inference/app.py
deleted file mode 100644
index 320dbdf58d22accfca6a5ffc2faa02814212ad91..0000000000000000000000000000000000000000
--- a/spaces/stratussox/yolov5_inference/app.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import json
-from io import BytesIO
-from PIL import Image
-import os
-
-import streamlit as st
-import pandas as pd
-import numpy as np
-import torch
-import cv2
-
-#from simple_detection import detect
-from one_image_detection import detect
-
-if 'img_list' not in st.session_state:
- st.session_state.img_list = []
-
-st.title('Direct YoloV5 Inference')
-instructions = """
- Upload your images and run the model to
- test the basic YoloV5 model.
-
- Check the original YoloV5 repository in: https://github.com/ultralytics/yolov5
- """
-st.write(instructions)
-
-if st.button('Run Model'):
- result = detect(st.session_state['img_list'])
- st.session_state['img_list'] = []
-
- for image in result:
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
- st.image(image, caption='Result')
-
-file = st.file_uploader('Upload An Image')
-
-if file: # if user uploaded file
- file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)
- img = cv2.imdecode(file_bytes, 1)
-
- # print(len(img_list))
- # img_list.append(img)
- # print("Loaded images = ")
- # print(len(img_list))
-
- st.session_state['img_list'].append(img)
-
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- st.title("Uploaded Image")
- resized_image = cv2.resize(img, (256,256))
- st.image(resized_image)
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/sub314xxl/DualStyleGAN/app.py b/spaces/sub314xxl/DualStyleGAN/app.py
deleted file mode 100644
index 13c0b5fd70358c15919c1c8ed9267af3cf0cc3db..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/DualStyleGAN/app.py
+++ /dev/null
@@ -1,196 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import argparse
-import pathlib
-
-import gradio as gr
-
-from dualstylegan import Model
-
-DESCRIPTION = '''# Portrait Style Transfer with DualStyleGAN
-
-
-'''
-
-
-def get_style_image_url(style_name: str) -> str:
- base_url = 'https://raw.githubusercontent.com/williamyang1991/DualStyleGAN/main/doc_images'
- filenames = {
- 'cartoon': 'cartoon_overview.jpg',
- 'caricature': 'caricature_overview.jpg',
- 'anime': 'anime_overview.jpg',
- 'arcane': 'Reconstruction_arcane_overview.jpg',
- 'comic': 'Reconstruction_comic_overview.jpg',
- 'pixar': 'Reconstruction_pixar_overview.jpg',
- 'slamdunk': 'Reconstruction_slamdunk_overview.jpg',
- }
- return f'{base_url}/{filenames[style_name]}'
-
-
-def get_style_image_markdown_text(style_name: str) -> str:
- url = get_style_image_url(style_name)
- return f' '
-
-
-def update_slider(choice: str) -> dict:
- max_vals = {
- 'cartoon': 316,
- 'caricature': 198,
- 'anime': 173,
- 'arcane': 99,
- 'comic': 100,
- 'pixar': 121,
- 'slamdunk': 119,
- }
- return gr.Slider.update(maximum=max_vals[choice])
-
-
-def update_style_image(style_name: str) -> dict:
- text = get_style_image_markdown_text(style_name)
- return gr.Markdown.update(value=text)
-
-
-def set_example_image(example: list) -> dict:
- return gr.Image.update(value=example[0])
-
-
-def set_example_styles(example: list) -> list[dict]:
- return [
- gr.Radio.update(value=example[0]),
- gr.Slider.update(value=example[1]),
- ]
-
-
-def set_example_weights(example: list) -> list[dict]:
- return [
- gr.Slider.update(value=example[0]),
- gr.Slider.update(value=example[1]),
- ]
-
-
-model = Model()
-
-with gr.Blocks(css='style.css') as demo:
- gr.Markdown(DESCRIPTION)
-
- with gr.Box():
- gr.Markdown('''## Step 1 (Preprocess Input Image)
-
-- Drop an image containing a near-frontal face to the **Input Image**.
-- If there are multiple faces in the image, hit the Edit button in the upper right corner and crop the input image beforehand.
-- Hit the **Detect & Align Face** button.
-- Hit the **Reconstruct Face** button.
-- The final result will be based on this **Reconstructed Face**. So, if the reconstructed image is not satisfactory, you may want to change the input image.
-''')
- with gr.Row():
- with gr.Column():
- with gr.Row():
- input_image = gr.Image(label='Input Image',
- type='filepath')
- with gr.Row():
- detect_button = gr.Button('Detect & Align Face')
- with gr.Column():
- with gr.Row():
- aligned_face = gr.Image(label='Aligned Face',
- type='numpy',
- interactive=False)
- with gr.Row():
- reconstruct_button = gr.Button('Reconstruct Face')
- with gr.Column():
- reconstructed_face = gr.Image(label='Reconstructed Face',
- type='numpy')
- instyle = gr.Variable()
-
- with gr.Row():
- paths = sorted(pathlib.Path('images').glob('*.jpg'))
- gr.Examples(examples=[[path.as_posix()] for path in paths],
- inputs=input_image)
-
- with gr.Box():
- gr.Markdown('''## Step 2 (Select Style Image)
-
-- Select **Style Type**.
-- Select **Style Image Index** from the image table below.
-''')
- with gr.Row():
- with gr.Column():
- style_type = gr.Radio(label='Style Type',
- choices=model.style_types)
- text = get_style_image_markdown_text('cartoon')
- style_image = gr.Markdown(value=text)
- style_index = gr.Slider(label='Style Image Index',
- minimum=0,
- maximum=316,
- step=1,
- value=26)
-
- with gr.Row():
- gr.Examples(examples=[
- ['cartoon', 26],
- ['caricature', 65],
- ['arcane', 63],
- ['pixar', 80],
- ],
- inputs=[style_type, style_index])
-
- with gr.Box():
- gr.Markdown('''## Step 3 (Generate Style Transferred Image)
-
-- Adjust **Structure Weight** and **Color Weight**.
-- These are weights for the style image, so the larger the value, the closer the resulting image will be to the style image.
-- Hit the **Generate** button.
-''')
- with gr.Row():
- with gr.Column():
- with gr.Row():
- structure_weight = gr.Slider(label='Structure Weight',
- minimum=0,
- maximum=1,
- step=0.1,
- value=0.6)
- with gr.Row():
- color_weight = gr.Slider(label='Color Weight',
- minimum=0,
- maximum=1,
- step=0.1,
- value=1)
- with gr.Row():
- structure_only = gr.Checkbox(label='Structure Only')
- with gr.Row():
- generate_button = gr.Button('Generate')
-
- with gr.Column():
- result = gr.Image(label='Result')
-
- with gr.Row():
- gr.Examples(examples=[
- [0.6, 1.0],
- [0.3, 1.0],
- [0.0, 1.0],
- [1.0, 0.0],
- ],
- inputs=[structure_weight, color_weight])
-
- detect_button.click(fn=model.detect_and_align_face,
- inputs=input_image,
- outputs=aligned_face)
- reconstruct_button.click(fn=model.reconstruct_face,
- inputs=aligned_face,
- outputs=[reconstructed_face, instyle])
- style_type.change(fn=update_slider, inputs=style_type, outputs=style_index)
- style_type.change(fn=update_style_image,
- inputs=style_type,
- outputs=style_image)
- generate_button.click(fn=model.generate,
- inputs=[
- style_type,
- style_index,
- structure_weight,
- color_weight,
- structure_only,
- instyle,
- ],
- outputs=result)
-demo.queue(max_size=10).launch()
diff --git a/spaces/sub314xxl/MetaGPT/metagpt/actions/skill_action.py b/spaces/sub314xxl/MetaGPT/metagpt/actions/skill_action.py
deleted file mode 100644
index 758591fdd7838ba3d45efbe9dd40c0ce8508c93f..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MetaGPT/metagpt/actions/skill_action.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/8/28
-@Author : mashenquan
-@File : skill_action.py
-@Desc : Call learned skill
-"""
-from __future__ import annotations
-
-import ast
-import importlib
-import traceback
-from copy import deepcopy
-
-from metagpt.actions import Action, ActionOutput
-from metagpt.learn.skill_loader import Skill
-from metagpt.logs import logger
-
-
-class ArgumentsParingAction(Action):
- def __init__(self, last_talk: str, skill: Skill, context=None, llm=None, **kwargs):
- super(ArgumentsParingAction, self).__init__(name="", context=context, llm=llm)
- self.skill = skill
- self.ask = last_talk
- self.rsp = None
- self.args = None
-
- @property
- def prompt(self):
- prompt = f"{self.skill.name} function parameters description:\n"
- for k, v in self.skill.arguments.items():
- prompt += f"parameter `{k}`: {v}\n"
- prompt += "\n"
- prompt += "Examples:\n"
- for e in self.skill.examples:
- prompt += f"If want you to do `{e.ask}`, return `{e.answer}` brief and clear.\n"
- prompt += f"\nNow I want you to do `{self.ask}`, return in examples format above, brief and clear."
- return prompt
-
- async def run(self, *args, **kwargs) -> ActionOutput:
- prompt = self.prompt
- logger.info(prompt)
- rsp = await self.llm.aask(msg=prompt, system_msgs=[])
- logger.info(rsp)
- self.args = ArgumentsParingAction.parse_arguments(skill_name=self.skill.name, txt=rsp)
- self.rsp = ActionOutput(content=rsp)
- return self.rsp
-
- @staticmethod
- def parse_arguments(skill_name, txt) -> dict:
- prefix = skill_name + "("
- if prefix not in txt:
- logger.error(f"{skill_name} not in {txt}")
- return None
- if ")" not in txt:
- logger.error(f"')' not in {txt}")
- return None
- begin_ix = txt.find(prefix)
- end_ix = txt.rfind(")")
- args_txt = txt[begin_ix + len(prefix) : end_ix]
- logger.info(args_txt)
- fake_expression = f"dict({args_txt})"
- parsed_expression = ast.parse(fake_expression, mode="eval")
- args = {}
- for keyword in parsed_expression.body.keywords:
- key = keyword.arg
- value = ast.literal_eval(keyword.value)
- args[key] = value
- return args
-
-
-class SkillAction(Action):
- def __init__(self, skill: Skill, args: dict, context=None, llm=None, **kwargs):
- super(SkillAction, self).__init__(name="", context=context, llm=llm)
- self._skill = skill
- self._args = args
- self.rsp = None
-
- async def run(self, *args, **kwargs) -> str | ActionOutput | None:
- """Run action"""
- options = deepcopy(kwargs)
- if self._args:
- for k in self._args.keys():
- if k in options:
- options.pop(k)
- try:
- self.rsp = await self.find_and_call_function(self._skill.name, args=self._args, **options)
- except Exception as e:
- logger.exception(f"{e}, traceback:{traceback.format_exc()}")
- self.rsp = f"Error: {e}"
- return ActionOutput(content=self.rsp, instruct_content=self._skill.json())
-
- @staticmethod
- async def find_and_call_function(function_name, args, **kwargs):
- try:
- module = importlib.import_module("metagpt.learn")
- function = getattr(module, function_name)
- # 调用函数并返回结果
- result = await function(**args, **kwargs)
- return result
- except (ModuleNotFoundError, AttributeError):
- logger.error(f"{function_name} not found")
- return None
-
-
-if __name__ == "__main__":
- ArgumentsParingAction.parse_arguments(
- skill_name="text_to_image", txt='`text_to_image(text="Draw an apple", size_type="512x512")`'
- )
diff --git a/spaces/sub314xxl/MetaGPT/metagpt/const.py b/spaces/sub314xxl/MetaGPT/metagpt/const.py
deleted file mode 100644
index fbc2c928a14b0b5770b266dd79b02b2c7814880a..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MetaGPT/metagpt/const.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/1 11:59
-@Author : alexanderwu
-@File : const.py'
-@Modified By: mashenquan, 2023/8/28. Add 'OPTIONS', 'DEFAULT_LANGUAGE', 'DEFAULT_MAX_TOKENS'...
-"""
-import contextvars
-from pathlib import Path
-
-
-def get_project_root():
- """逐级向上寻找项目根目录"""
- current_path = Path.cwd()
- while True:
- if (
- (current_path / ".git").exists()
- or (current_path / ".project_root").exists()
- or (current_path / ".gitignore").exists()
- ):
- return current_path
- parent_path = current_path.parent
- if parent_path == current_path:
- raise Exception("Project root not found.")
- current_path = parent_path
-
-
-PROJECT_ROOT = get_project_root()
-DATA_PATH = PROJECT_ROOT / "data"
-WORKSPACE_ROOT = PROJECT_ROOT / "workspace"
-PROMPT_PATH = PROJECT_ROOT / "metagpt/prompts"
-UT_PATH = PROJECT_ROOT / "data/ut"
-SWAGGER_PATH = UT_PATH / "files/api/"
-UT_PY_PATH = UT_PATH / "files/ut/"
-API_QUESTIONS_PATH = UT_PATH / "files/question/"
-YAPI_URL = "http://yapi.deepwisdomai.com/"
-TMP = PROJECT_ROOT / "tmp"
-RESEARCH_PATH = DATA_PATH / "research"
-
-MEM_TTL = 24 * 30 * 3600
-
-OPTIONS = contextvars.ContextVar("OPTIONS")
-DEFAULT_LANGUAGE = "English"
-DEFAULT_MAX_TOKENS = 1500
-COMMAND_TOKENS = 500
-BRAIN_MEMORY = "BRAIN_MEMORY"
-SKILL_PATH = "SKILL_PATH"
-SERPER_API_KEY = "SERPER_API_KEY"
-
-# Key Definitions for MetaGPT LLM
-METAGPT_API_MODEL = "METAGPT_API_MODEL"
-METAGPT_API_KEY = "METAGPT_API_KEY"
-METAGPT_API_BASE = "METAGPT_API_BASE"
-METAGPT_API_TYPE = "METAGPT_API_TYPE"
-METAGPT_API_VERSION = "METAGPT_API_VERSION"
-
-# format
-BASE64_FORMAT = "base64"
diff --git a/spaces/sub314xxl/MusicGen-Continuation/Makefile b/spaces/sub314xxl/MusicGen-Continuation/Makefile
deleted file mode 100644
index 5bfd89dd833d7448b21073eb6ee7cfac1d5157dd..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MusicGen-Continuation/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-default: linter tests
-
-install:
- pip install -U pip
- pip install -U -e '.[dev]'
-
-linter:
- flake8 audiocraft && mypy audiocraft
- flake8 tests && mypy tests
-
-tests:
- coverage run -m pytest tests
- coverage report --include 'audiocraft/*'
-
-docs:
- pdoc3 --html -o docs -f audiocraft
-
-dist:
- python setup.py sdist
-
-.PHONY: linter tests docs dist
diff --git a/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/position_encoding.py b/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/position_encoding.py
deleted file mode 100644
index f60587a41d5f3b26c247ef569523ec4a595bd4b8..0000000000000000000000000000000000000000
--- a/spaces/subhc/Guess-What-Moves/mask_former/modeling/transformer/position_encoding.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
-"""
-Various positional encodings for the transformer.
-"""
-import math
-
-import torch
-from torch import nn
-
-
-class PositionEmbeddingSine(nn.Module):
- """
- This is a more standard version of the position embedding, very similar to the one
- used by the Attention is all you need paper, generalized to work on images.
- """
-
- def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
- super().__init__()
- self.num_pos_feats = num_pos_feats
- self.temperature = temperature
- self.normalize = normalize
- if scale is not None and normalize is False:
- raise ValueError("normalize should be True if scale is passed")
- if scale is None:
- scale = 2 * math.pi
- self.scale = scale
-
- def forward(self, x, mask=None):
- if mask is None:
- mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
- not_mask = ~mask
- y_embed = not_mask.cumsum(1, dtype=torch.float32)
- x_embed = not_mask.cumsum(2, dtype=torch.float32)
- if self.normalize:
- eps = 1e-6
- y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
- x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
-
- dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
- dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.num_pos_feats)
-
- pos_x = x_embed[:, :, :, None] / dim_t
- pos_y = y_embed[:, :, :, None] / dim_t
- pos_x = torch.stack(
- (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
- ).flatten(3)
- pos_y = torch.stack(
- (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
- ).flatten(3)
- pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
- return pos
diff --git a/spaces/sujitpal/clip-rsicd-demo/dashboard_featurefinder.py b/spaces/sujitpal/clip-rsicd-demo/dashboard_featurefinder.py
deleted file mode 100644
index cc58012f6f173293732c6318a1b4fa6c6d82859a..0000000000000000000000000000000000000000
--- a/spaces/sujitpal/clip-rsicd-demo/dashboard_featurefinder.py
+++ /dev/null
@@ -1,160 +0,0 @@
-import jax
-import flax
-import matplotlib.pyplot as plt
-import nmslib
-import numpy as np
-import os
-import requests
-import streamlit as st
-
-from tempfile import NamedTemporaryFile
-from torchvision.transforms import Compose, Resize, ToPILImage
-from transformers import CLIPProcessor, FlaxCLIPModel
-from PIL import Image
-
-import utils
-
-BASELINE_MODEL = "openai/clip-vit-base-patch32"
-MODEL_PATH = "flax-community/clip-rsicd-v2"
-
-IMAGE_VECTOR_FILE = "./vectors/test-bs128x8-lr5e-6-adam-ckpt-1.tsv"
-
-IMAGES_DIR = "./images"
-DEMO_IMAGES_DIR = "./demo-images"
-
-
-def split_image(X):
- num_rows = X.shape[0] // 224
- num_cols = X.shape[1] // 224
- Xc = X[0 : num_rows * 224, 0 : num_cols * 224, :]
- patches = []
- for j in range(num_rows):
- for i in range(num_cols):
- patches.append(Xc[j * 224 : (j + 1) * 224,
- i * 224 : (i + 1) * 224,
- :])
- return num_rows, num_cols, patches
-
-
-def get_patch_probabilities(patches, searched_feature,
- image_preprocesor,
- model, processor):
- images = [image_preprocesor(patch) for patch in patches]
- text = "An aerial image of {:s}".format(searched_feature)
- inputs = processor(images=images,
- text=text,
- return_tensors="jax",
- padding=True)
- outputs = model(**inputs)
- probs = jax.nn.softmax(outputs.logits_per_text, axis=-1)
- probs_np = np.asarray(probs)[0]
- return probs_np
-
-
-def get_image_ranks(probs):
- temp = np.argsort(-probs)
- ranks = np.empty_like(temp)
- ranks[temp] = np.arange(len(probs))
- return ranks
-
-
-def download_and_prepare_image(image_url):
- """
- Take input image and resize it to 672x896
- """
- try:
- image_raw = requests.get(image_url, stream=True,).raw
- image = Image.open(image_raw).convert("RGB")
- width, height = image.size
- # print("WID,HGT:", width, height)
- if width < 224 or height < 224:
- return None
- # take the short edge and reduce to 672
- if width < height:
- resize_factor = 672 / width
- image = image.resize((672, int(height * resize_factor)))
- image = image.crop((0, 0, 672, 896))
- else:
- resize_factor = 672 / height
- image = image.resize((int(width * resize_factor), 896))
- image = image.crop((0, 0, 896, 672))
- return np.asarray(image)
- except Exception as e:
- # print(e)
- return None
-
-
-
-def app():
- model, processor = utils.load_model(MODEL_PATH, BASELINE_MODEL)
-
- st.title("Find Features in Images")
- st.markdown("""
- This demo shows the ability of the model to find specific features
- (specified as text queries) in the image. As an example, say you wish to
- find the parts of the following image that contain a `beach`, `houses`,
- or `ships`. We partition the image into tiles of (224, 224) and report
- how likely each of them are to contain each text features.
- """)
- st.image("demo-images/st_tropez_1.png")
- st.image("demo-images/st_tropez_2.png")
- st.markdown("""
- For this image and the queries listed above, our model reports that the
- two left tiles are most likely to contain a `beach`, the two top right
- tiles are most likely to contain `houses`, and the two bottom right tiles
- are likely to contain `boats`.
-
- We have provided a few representative images from [Unsplash](https://unsplash.com/s/photos/aerial-view)
- that you can experiment with. Use the image name to put in an initial feature
- to look for, this will show the original image, and you will get more ideas
- for features that you can ask the model to identify.
- """)
- image_file = st.selectbox(
- "Sample Image File",
- options=[
- "-- select one --",
- "St-Tropez-Port.jpg",
- "Acopulco-Bay.jpg",
- "Highway-through-Forest.jpg",
- "Forest-with-River.jpg",
- "Eagle-Bay-Coastline.jpg",
- "Multistoreyed-Buildings.jpg",
- "Street-View-Malayasia.jpg",
- ])
- image_url = st.text_input(
- "OR provide an image URL",
- value="https://static.eos.com/wp-content/uploads/2019/04/Main.jpg")
- searched_feature = st.text_input("Feature to find", value="beach")
-
- if st.button("Find"):
- if image_file.startswith("--"):
- image = download_and_prepare_image(image_url)
- else:
- image = plt.imread(os.path.join("demo-images", image_file))
-
- if image is None:
- st.error("Image could not be downloaded, please try another one")
- else:
- st.image(image, caption="Input Image")
- st.markdown("---")
- num_rows, num_cols, patches = split_image(image)
- image_preprocessor = Compose([
- ToPILImage(),
- Resize(224)
- ])
- num_rows, num_cols, patches = split_image(image)
- patch_probs = get_patch_probabilities(
- patches,
- searched_feature,
- image_preprocessor,
- model,
- processor)
- patch_ranks = get_image_ranks(patch_probs)
- pid = 0
- for i in range(num_rows):
- cols = st.columns(num_cols)
- for col in cols:
- caption = "#{:d} p({:s})={:.3f}".format(
- patch_ranks[pid] + 1, searched_feature, patch_probs[pid])
- col.image(patches[pid], caption=caption)
- pid += 1
diff --git a/spaces/sunilbhatia/hackathon1/app/Hackathon_setup/exp_recognition.py b/spaces/sunilbhatia/hackathon1/app/Hackathon_setup/exp_recognition.py
deleted file mode 100644
index ae4a578972eb2345c1ed8207946db8152dbe8c21..0000000000000000000000000000000000000000
--- a/spaces/sunilbhatia/hackathon1/app/Hackathon_setup/exp_recognition.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import numpy as np
-import cv2
-from matplotlib import pyplot as plt
-import torch
-# In the below line,remove '.' while working on your local system.However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
-from .exp_recognition_model import *
-from PIL import Image
-import base64
-import io
-import os
-## Add more imports if required
-
-#############################################################################################################################
-# Caution: Don't change any of the filenames, function names and definitions #
-# Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
-#############################################################################################################################
-
-# Current_path stores absolute path of the file from where it runs.
-current_path = os.path.dirname(os.path.abspath(__file__))
-
-
-#1) The below function is used to detect faces in the given image.
-#2) It returns only one image which has maximum area out of all the detected faces in the photo.
-#3) If no face is detected,then it returns zero(0).
-
-def detected_face(image):
- eye_haar = current_path + '/haarcascade_eye.xml'
- face_haar = current_path + '/haarcascade_frontalface_default.xml'
- face_cascade = cv2.CascadeClassifier(face_haar)
- eye_cascade = cv2.CascadeClassifier(eye_haar)
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- faces = face_cascade.detectMultiScale(gray, 1.3, 5)
- face_areas=[]
- images = []
- required_image=0
- for i, (x,y,w,h) in enumerate(faces):
- face_cropped = gray[y:y+h, x:x+w]
- face_areas.append(w*h)
- images.append(face_cropped)
- required_image = images[np.argmax(face_areas)]
- required_image = Image.fromarray(required_image)
- return required_image
-
-
-#1) Images captured from mobile is passed as parameter to the below function in the API call, It returns the Expression detected by your network.
-#2) The image is passed to the function in base64 encoding, Code for decoding the image is provided within the function.
-#3) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode.
-#4) Perform necessary transformations to the input(detected face using the above function), this should return the Expression in string form ex: "Anger"
-#5) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function
-##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
-def get_expression(img):
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
- ##########################################################################################
- ##Example for loading a model using weight state dictionary: ##
- ## face_det_net = facExpRec() #Example Network ##
- ## model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device) ##
- ## face_det_net.load_state_dict(model['net_dict']) ##
- ## ##
- ##current_path + '/' is path of the saved model if present in ##
- ##the same path as this file, we recommend to put in the same directory ##
- ##########################################################################################
- ##########################################################################################
-
- face = detected_face(img)
- if face==0:
- face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
- face = trnscm(face).unsqueeze(0)
-
-
- # YOUR CODE HERE, load the model
- expression_model = ExpressionCNN()
- model = torch.load(current_path + '/expression_model_gpu.t7', map_location=device)
- expression_model.load_state_dict(model['net_dict'])
-
- outputs = expression_model(face)
- _, predicted = torch.max(outputs.data, 1)
-
- return classes[predicted.item()]
diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/images.py b/spaces/supertori/files/stable-diffusion-webui/modules/images.py
deleted file mode 100644
index 04b2727bd5c3cb6faeb6aa38c24d61f8f2f6ea4b..0000000000000000000000000000000000000000
--- a/spaces/supertori/files/stable-diffusion-webui/modules/images.py
+++ /dev/null
@@ -1,669 +0,0 @@
-import datetime
-import sys
-import traceback
-
-import pytz
-import io
-import math
-import os
-from collections import namedtuple
-import re
-
-import numpy as np
-import piexif
-import piexif.helper
-from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
-from fonts.ttf import Roboto
-import string
-import json
-import hashlib
-
-from modules import sd_samplers, shared, script_callbacks, errors
-from modules.shared import opts, cmd_opts
-
-LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
-
-
-def image_grid(imgs, batch_size=1, rows=None):
- if rows is None:
- if opts.n_rows > 0:
- rows = opts.n_rows
- elif opts.n_rows == 0:
- rows = batch_size
- elif opts.grid_prevent_empty_spots:
- rows = math.floor(math.sqrt(len(imgs)))
- while len(imgs) % rows != 0:
- rows -= 1
- else:
- rows = math.sqrt(len(imgs))
- rows = round(rows)
- if rows > len(imgs):
- rows = len(imgs)
-
- cols = math.ceil(len(imgs) / rows)
-
- params = script_callbacks.ImageGridLoopParams(imgs, cols, rows)
- script_callbacks.image_grid_callback(params)
-
- w, h = imgs[0].size
- grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black')
-
- for i, img in enumerate(params.imgs):
- grid.paste(img, box=(i % params.cols * w, i // params.cols * h))
-
- return grid
-
-
-Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
-
-
-def split_grid(image, tile_w=512, tile_h=512, overlap=64):
- w = image.width
- h = image.height
-
- non_overlap_width = tile_w - overlap
- non_overlap_height = tile_h - overlap
-
- cols = math.ceil((w - overlap) / non_overlap_width)
- rows = math.ceil((h - overlap) / non_overlap_height)
-
- dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
- dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
-
- grid = Grid([], tile_w, tile_h, w, h, overlap)
- for row in range(rows):
- row_images = []
-
- y = int(row * dy)
-
- if y + tile_h >= h:
- y = h - tile_h
-
- for col in range(cols):
- x = int(col * dx)
-
- if x + tile_w >= w:
- x = w - tile_w
-
- tile = image.crop((x, y, x + tile_w, y + tile_h))
-
- row_images.append([x, tile_w, tile])
-
- grid.tiles.append([y, tile_h, row_images])
-
- return grid
-
-
-def combine_grid(grid):
- def make_mask_image(r):
- r = r * 255 / grid.overlap
- r = r.astype(np.uint8)
- return Image.fromarray(r, 'L')
-
- mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
- mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
-
- combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
- for y, h, row in grid.tiles:
- combined_row = Image.new("RGB", (grid.image_w, h))
- for x, w, tile in row:
- if x == 0:
- combined_row.paste(tile, (0, 0))
- continue
-
- combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
- combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
-
- if y == 0:
- combined_image.paste(combined_row, (0, 0))
- continue
-
- combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
- combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
-
- return combined_image
-
-
-class GridAnnotation:
- def __init__(self, text='', is_active=True):
- self.text = text
- self.is_active = is_active
- self.size = None
-
-
-def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
- def wrap(drawing, text, font, line_length):
- lines = ['']
- for word in text.split():
- line = f'{lines[-1]} {word}'.strip()
- if drawing.textlength(line, font=font) <= line_length:
- lines[-1] = line
- else:
- lines.append(word)
- return lines
-
- def get_font(fontsize):
- try:
- return ImageFont.truetype(opts.font or Roboto, fontsize)
- except Exception:
- return ImageFont.truetype(Roboto, fontsize)
-
- def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
- for i, line in enumerate(lines):
- fnt = initial_fnt
- fontsize = initial_fontsize
- while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:
- fontsize -= 1
- fnt = get_font(fontsize)
- drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
-
- if not line.is_active:
- drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4)
-
- draw_y += line.size[1] + line_spacing
-
- fontsize = (width + height) // 25
- line_spacing = fontsize // 2
-
- fnt = get_font(fontsize)
-
- color_active = (0, 0, 0)
- color_inactive = (153, 153, 153)
-
- pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
-
- cols = im.width // width
- rows = im.height // height
-
- assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
- assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
-
- calc_img = Image.new("RGB", (1, 1), "white")
- calc_d = ImageDraw.Draw(calc_img)
-
- for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
- items = [] + texts
- texts.clear()
-
- for line in items:
- wrapped = wrap(calc_d, line.text, fnt, allowed_width)
- texts += [GridAnnotation(x, line.is_active) for x in wrapped]
-
- for line in texts:
- bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
- line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
- line.allowed_width = allowed_width
-
- hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
- ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
-
- pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2
-
- result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), "white")
-
- for row in range(rows):
- for col in range(cols):
- cell = im.crop((width * col, height * row, width * (col+1), height * (row+1)))
- result.paste(cell, (pad_left + (width + margin) * col, pad_top + (height + margin) * row))
-
- d = ImageDraw.Draw(result)
-
- for col in range(cols):
- x = pad_left + (width + margin) * col + width / 2
- y = pad_top / 2 - hor_text_heights[col] / 2
-
- draw_texts(d, x, y, hor_texts[col], fnt, fontsize)
-
- for row in range(rows):
- x = pad_left / 2
- y = pad_top + (height + margin) * row + height / 2 - ver_text_heights[row] / 2
-
- draw_texts(d, x, y, ver_texts[row], fnt, fontsize)
-
- return result
-
-
-def draw_prompt_matrix(im, width, height, all_prompts, margin=0):
- prompts = all_prompts[1:]
- boundary = math.ceil(len(prompts) / 2)
-
- prompts_horiz = prompts[:boundary]
- prompts_vert = prompts[boundary:]
-
- hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
- ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
-
- return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin)
-
-
-def resize_image(resize_mode, im, width, height, upscaler_name=None):
- """
- Resizes an image with the specified resize_mode, width, and height.
-
- Args:
- resize_mode: The mode to use when resizing the image.
- 0: Resize the image to the specified width and height.
- 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
- 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
- im: The image to resize.
- width: The width to resize the image to.
- height: The height to resize the image to.
- upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img.
- """
-
- upscaler_name = upscaler_name or opts.upscaler_for_img2img
-
- def resize(im, w, h):
- if upscaler_name is None or upscaler_name == "None" or im.mode == 'L':
- return im.resize((w, h), resample=LANCZOS)
-
- scale = max(w / im.width, h / im.height)
-
- if scale > 1.0:
- upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name]
- assert len(upscalers) > 0, f"could not find upscaler named {upscaler_name}"
-
- upscaler = upscalers[0]
- im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
-
- if im.width != w or im.height != h:
- im = im.resize((w, h), resample=LANCZOS)
-
- return im
-
- if resize_mode == 0:
- res = resize(im, width, height)
-
- elif resize_mode == 1:
- ratio = width / height
- src_ratio = im.width / im.height
-
- src_w = width if ratio > src_ratio else im.width * height // im.height
- src_h = height if ratio <= src_ratio else im.height * width // im.width
-
- resized = resize(im, src_w, src_h)
- res = Image.new("RGB", (width, height))
- res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
-
- else:
- ratio = width / height
- src_ratio = im.width / im.height
-
- src_w = width if ratio < src_ratio else im.width * height // im.height
- src_h = height if ratio >= src_ratio else im.height * width // im.width
-
- resized = resize(im, src_w, src_h)
- res = Image.new("RGB", (width, height))
- res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
-
- if ratio < src_ratio:
- fill_height = height // 2 - src_h // 2
- res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
- res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
- elif ratio > src_ratio:
- fill_width = width // 2 - src_w // 2
- res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
- res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
-
- return res
-
-
-invalid_filename_chars = '<>:"/\\|?*\n'
-invalid_filename_prefix = ' '
-invalid_filename_postfix = ' .'
-re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
-re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
-re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
-max_filename_part_length = 128
-
-
-def sanitize_filename_part(text, replace_spaces=True):
- if text is None:
- return None
-
- if replace_spaces:
- text = text.replace(' ', '_')
-
- text = text.translate({ord(x): '_' for x in invalid_filename_chars})
- text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
- text = text.rstrip(invalid_filename_postfix)
- return text
-
-
-class FilenameGenerator:
- replacements = {
- 'seed': lambda self: self.seed if self.seed is not None else '',
- 'steps': lambda self: self.p and self.p.steps,
- 'cfg': lambda self: self.p and self.p.cfg_scale,
- 'width': lambda self: self.image.width,
- 'height': lambda self: self.image.height,
- 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
- 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
- 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
- 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.model_name, replace_spaces=False),
- 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
- 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime]
- 'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
- 'prompt_hash': lambda self: hashlib.sha256(self.prompt.encode()).hexdigest()[0:8],
- 'prompt': lambda self: sanitize_filename_part(self.prompt),
- 'prompt_no_styles': lambda self: self.prompt_no_style(),
- 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
- 'prompt_words': lambda self: self.prompt_words(),
- }
- default_time_format = '%Y%m%d%H%M%S'
-
- def __init__(self, p, seed, prompt, image):
- self.p = p
- self.seed = seed
- self.prompt = prompt
- self.image = image
-
- def prompt_no_style(self):
- if self.p is None or self.prompt is None:
- return None
-
- prompt_no_style = self.prompt
- for style in shared.prompt_styles.get_style_prompts(self.p.styles):
- if len(style) > 0:
- for part in style.split("{prompt}"):
- prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
-
- prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
-
- return sanitize_filename_part(prompt_no_style, replace_spaces=False)
-
- def prompt_words(self):
- words = [x for x in re_nonletters.split(self.prompt or "") if len(x) > 0]
- if len(words) == 0:
- words = ["empty"]
- return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False)
-
- def datetime(self, *args):
- time_datetime = datetime.datetime.now()
-
- time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
- try:
- time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
- except pytz.exceptions.UnknownTimeZoneError as _:
- time_zone = None
-
- time_zone_time = time_datetime.astimezone(time_zone)
- try:
- formatted_time = time_zone_time.strftime(time_format)
- except (ValueError, TypeError) as _:
- formatted_time = time_zone_time.strftime(self.default_time_format)
-
- return sanitize_filename_part(formatted_time, replace_spaces=False)
-
- def apply(self, x):
- res = ''
-
- for m in re_pattern.finditer(x):
- text, pattern = m.groups()
- res += text
-
- if pattern is None:
- continue
-
- pattern_args = []
- while True:
- m = re_pattern_arg.match(pattern)
- if m is None:
- break
-
- pattern, arg = m.groups()
- pattern_args.insert(0, arg)
-
- fun = self.replacements.get(pattern.lower())
- if fun is not None:
- try:
- replacement = fun(self, *pattern_args)
- except Exception:
- replacement = None
- print(f"Error adding [{pattern}] to filename", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
-
- if replacement is not None:
- res += str(replacement)
- continue
-
- res += f'[{pattern}]'
-
- return res
-
-
-def get_next_sequence_number(path, basename):
- """
- Determines and returns the next sequence number to use when saving an image in the specified directory.
-
- The sequence starts at 0.
- """
- result = -1
- if basename != '':
- basename = basename + "-"
-
- prefix_length = len(basename)
- for p in os.listdir(path):
- if p.startswith(basename):
- l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
- try:
- result = max(int(l[0]), result)
- except ValueError:
- pass
-
- return result + 1
-
-
-def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
- """Save an image.
-
- Args:
- image (`PIL.Image`):
- The image to be saved.
- path (`str`):
- The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
- basename (`str`):
- The base filename which will be applied to `filename pattern`.
- seed, prompt, short_filename,
- extension (`str`):
- Image file extension, default is `png`.
- pngsectionname (`str`):
- Specify the name of the section which `info` will be saved in.
- info (`str` or `PngImagePlugin.iTXt`):
- PNG info chunks.
- existing_info (`dict`):
- Additional PNG info. `existing_info == {pngsectionname: info, ...}`
- no_prompt:
- TODO I don't know its meaning.
- p (`StableDiffusionProcessing`)
- forced_filename (`str`):
- If specified, `basename` and filename pattern will be ignored.
- save_to_dirs (bool):
- If true, the image will be saved into a subdirectory of `path`.
-
- Returns: (fullfn, txt_fullfn)
- fullfn (`str`):
- The full path of the saved imaged.
- txt_fullfn (`str` or None):
- If a text file is saved for this image, this will be its full path. Otherwise None.
- """
- namegen = FilenameGenerator(p, seed, prompt, image)
-
- if save_to_dirs is None:
- save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
-
- if save_to_dirs:
- dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
- path = os.path.join(path, dirname)
-
- os.makedirs(path, exist_ok=True)
-
- if forced_filename is None:
- if short_filename or seed is None:
- file_decoration = ""
- elif opts.save_to_dirs:
- file_decoration = opts.samples_filename_pattern or "[seed]"
- else:
- file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
-
- add_number = opts.save_images_add_number or file_decoration == ''
-
- if file_decoration != "" and add_number:
- file_decoration = "-" + file_decoration
-
- file_decoration = namegen.apply(file_decoration) + suffix
-
- if add_number:
- basecount = get_next_sequence_number(path, basename)
- fullfn = None
- for i in range(500):
- fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
- fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
- if not os.path.exists(fullfn):
- break
- else:
- fullfn = os.path.join(path, f"{file_decoration}.{extension}")
- else:
- fullfn = os.path.join(path, f"{forced_filename}.{extension}")
-
- pnginfo = existing_info or {}
- if info is not None:
- pnginfo[pnginfo_section_name] = info
-
- params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
- script_callbacks.before_image_saved_callback(params)
-
- image = params.image
- fullfn = params.filename
- info = params.pnginfo.get(pnginfo_section_name, None)
-
- def _atomically_save_image(image_to_save, filename_without_extension, extension):
- # save image with .tmp extension to avoid race condition when another process detects new image in the directory
- temp_file_path = filename_without_extension + ".tmp"
- image_format = Image.registered_extensions()[extension]
-
- if extension.lower() == '.png':
- pnginfo_data = PngImagePlugin.PngInfo()
- if opts.enable_pnginfo:
- for k, v in params.pnginfo.items():
- pnginfo_data.add_text(k, str(v))
-
- image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
-
- elif extension.lower() in (".jpg", ".jpeg", ".webp"):
- if image_to_save.mode == 'RGBA':
- image_to_save = image_to_save.convert("RGB")
- elif image_to_save.mode == 'I;16':
- image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L")
-
- image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless)
-
- if opts.enable_pnginfo and info is not None:
- exif_bytes = piexif.dump({
- "Exif": {
- piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
- },
- })
-
- piexif.insert(exif_bytes, temp_file_path)
- else:
- image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality)
-
- # atomically rename the file with correct extension
- os.replace(temp_file_path, filename_without_extension + extension)
-
- fullfn_without_extension, extension = os.path.splitext(params.filename)
- _atomically_save_image(image, fullfn_without_extension, extension)
-
- image.already_saved_as = fullfn
-
- oversize = image.width > opts.target_side_length or image.height > opts.target_side_length
- if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024):
- ratio = image.width / image.height
-
- if oversize and ratio > 1:
- image = image.resize((round(opts.target_side_length), round(image.height * opts.target_side_length / image.width)), LANCZOS)
- elif oversize:
- image = image.resize((round(image.width * opts.target_side_length / image.height), round(opts.target_side_length)), LANCZOS)
-
- try:
- _atomically_save_image(image, fullfn_without_extension, ".jpg")
- except Exception as e:
- errors.display(e, "saving image as downscaled JPG")
-
- if opts.save_txt and info is not None:
- txt_fullfn = f"{fullfn_without_extension}.txt"
- with open(txt_fullfn, "w", encoding="utf8") as file:
- file.write(info + "\n")
- else:
- txt_fullfn = None
-
- script_callbacks.image_saved_callback(params)
-
- return fullfn, txt_fullfn
-
-
-def read_info_from_image(image):
- items = image.info or {}
-
- geninfo = items.pop('parameters', None)
-
- if "exif" in items:
- exif = piexif.load(items["exif"])
- exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'')
- try:
- exif_comment = piexif.helper.UserComment.load(exif_comment)
- except ValueError:
- exif_comment = exif_comment.decode('utf8', errors="ignore")
-
- if exif_comment:
- items['exif comment'] = exif_comment
- geninfo = exif_comment
-
- for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
- 'loop', 'background', 'timestamp', 'duration']:
- items.pop(field, None)
-
- if items.get("Software", None) == "NovelAI":
- try:
- json_info = json.loads(items["Comment"])
- sampler = sd_samplers.samplers_map.get(json_info["sampler"], "Euler a")
-
- geninfo = f"""{items["Description"]}
-Negative prompt: {json_info["uc"]}
-Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
- except Exception:
- print("Error parsing NovelAI image generation parameters:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
-
- return geninfo, items
-
-
-def image_data(data):
- try:
- image = Image.open(io.BytesIO(data))
- textinfo, _ = read_info_from_image(image)
- return textinfo, None
- except Exception:
- pass
-
- try:
- text = data.decode('utf8')
- assert len(text) < 10000
- return text, None
-
- except Exception:
- pass
-
- return '', None
-
-
-def flatten(img, bgcolor):
- """replaces transparency with bgcolor (example: "#ffffff"), returning an RGB mode image with no transparency"""
-
- if img.mode == "RGBA":
- background = Image.new('RGBA', img.size, bgcolor)
- background.paste(img, mask=img)
- img = background
-
- return img.convert('RGB')
diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/sd_disable_initialization.py b/spaces/supertori/files/stable-diffusion-webui/modules/sd_disable_initialization.py
deleted file mode 100644
index 50e4c180fc74988ec697e4cef2773bd2a785bccf..0000000000000000000000000000000000000000
--- a/spaces/supertori/files/stable-diffusion-webui/modules/sd_disable_initialization.py
+++ /dev/null
@@ -1,93 +0,0 @@
-import ldm.modules.encoders.modules
-import open_clip
-import torch
-import transformers.utils.hub
-
-
-class DisableInitialization:
- """
- When an object of this class enters a `with` block, it starts:
- - preventing torch's layer initialization functions from working
- - changes CLIP and OpenCLIP to not download model weights
- - changes CLIP to not make requests to check if there is a new version of a file you already have
-
- When it leaves the block, it reverts everything to how it was before.
-
- Use it like this:
- ```
- with DisableInitialization():
- do_things()
- ```
- """
-
- def __init__(self, disable_clip=True):
- self.replaced = []
- self.disable_clip = disable_clip
-
- def replace(self, obj, field, func):
- original = getattr(obj, field, None)
- if original is None:
- return None
-
- self.replaced.append((obj, field, original))
- setattr(obj, field, func)
-
- return original
-
- def __enter__(self):
- def do_nothing(*args, **kwargs):
- pass
-
- def create_model_and_transforms_without_pretrained(*args, pretrained=None, **kwargs):
- return self.create_model_and_transforms(*args, pretrained=None, **kwargs)
-
- def CLIPTextModel_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs):
- res = self.CLIPTextModel_from_pretrained(None, *model_args, config=pretrained_model_name_or_path, state_dict={}, **kwargs)
- res.name_or_path = pretrained_model_name_or_path
- return res
-
- def transformers_modeling_utils_load_pretrained_model(*args, **kwargs):
- args = args[0:3] + ('/', ) + args[4:] # resolved_archive_file; must set it to something to prevent what seems to be a bug
- return self.transformers_modeling_utils_load_pretrained_model(*args, **kwargs)
-
- def transformers_utils_hub_get_file_from_cache(original, url, *args, **kwargs):
-
- # this file is always 404, prevent making request
- if url == 'https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json':
- return None
-
- try:
- res = original(url, *args, local_files_only=True, **kwargs)
- if res is None:
- res = original(url, *args, local_files_only=False, **kwargs)
- return res
- except Exception as e:
- return original(url, *args, local_files_only=False, **kwargs)
-
- def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs):
- return transformers_utils_hub_get_file_from_cache(self.transformers_utils_hub_get_from_cache, url, *args, **kwargs)
-
- def transformers_tokenization_utils_base_cached_file(url, *args, local_files_only=False, **kwargs):
- return transformers_utils_hub_get_file_from_cache(self.transformers_tokenization_utils_base_cached_file, url, *args, **kwargs)
-
- def transformers_configuration_utils_cached_file(url, *args, local_files_only=False, **kwargs):
- return transformers_utils_hub_get_file_from_cache(self.transformers_configuration_utils_cached_file, url, *args, **kwargs)
-
- self.replace(torch.nn.init, 'kaiming_uniform_', do_nothing)
- self.replace(torch.nn.init, '_no_grad_normal_', do_nothing)
- self.replace(torch.nn.init, '_no_grad_uniform_', do_nothing)
-
- if self.disable_clip:
- self.create_model_and_transforms = self.replace(open_clip, 'create_model_and_transforms', create_model_and_transforms_without_pretrained)
- self.CLIPTextModel_from_pretrained = self.replace(ldm.modules.encoders.modules.CLIPTextModel, 'from_pretrained', CLIPTextModel_from_pretrained)
- self.transformers_modeling_utils_load_pretrained_model = self.replace(transformers.modeling_utils.PreTrainedModel, '_load_pretrained_model', transformers_modeling_utils_load_pretrained_model)
- self.transformers_tokenization_utils_base_cached_file = self.replace(transformers.tokenization_utils_base, 'cached_file', transformers_tokenization_utils_base_cached_file)
- self.transformers_configuration_utils_cached_file = self.replace(transformers.configuration_utils, 'cached_file', transformers_configuration_utils_cached_file)
- self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache)
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- for obj, field, original in self.replaced:
- setattr(obj, field, original)
-
- self.replaced.clear()
-
diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/ui_components.py b/spaces/supertori/files/stable-diffusion-webui/modules/ui_components.py
deleted file mode 100644
index d239d3f70938942f625f5f49e9398fcde10016bf..0000000000000000000000000000000000000000
--- a/spaces/supertori/files/stable-diffusion-webui/modules/ui_components.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import gradio as gr
-
-
-class ToolButton(gr.Button, gr.components.FormComponent):
- """Small button with single emoji as text, fits inside gradio forms"""
-
- def __init__(self, **kwargs):
- super().__init__(variant="tool", **kwargs)
-
- def get_block_name(self):
- return "button"
-
-
-class ToolButtonTop(gr.Button, gr.components.FormComponent):
- """Small button with single emoji as text, with extra margin at top, fits inside gradio forms"""
-
- def __init__(self, **kwargs):
- super().__init__(variant="tool-top", **kwargs)
-
- def get_block_name(self):
- return "button"
-
-
-class FormRow(gr.Row, gr.components.FormComponent):
- """Same as gr.Row but fits inside gradio forms"""
-
- def get_block_name(self):
- return "row"
-
-
-class FormGroup(gr.Group, gr.components.FormComponent):
- """Same as gr.Row but fits inside gradio forms"""
-
- def get_block_name(self):
- return "group"
-
-
-class FormHTML(gr.HTML, gr.components.FormComponent):
- """Same as gr.HTML but fits inside gradio forms"""
-
- def get_block_name(self):
- return "html"
-
-
-class FormColorPicker(gr.ColorPicker, gr.components.FormComponent):
- """Same as gr.ColorPicker but fits inside gradio forms"""
-
- def get_block_name(self):
- return "colorpicker"
-
-
-class DropdownMulti(gr.Dropdown):
- """Same as gr.Dropdown but always multiselect"""
- def __init__(self, **kwargs):
- super().__init__(multiselect=True, **kwargs)
-
- def get_block_name(self):
- return "dropdown"
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Campaign Cartographer 3 Download Free [EXCLUSIVE].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Campaign Cartographer 3 Download Free [EXCLUSIVE].md
deleted file mode 100644
index db710aef2264d7e5d41d54eb1f4b2eec7a754648..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Campaign Cartographer 3 Download Free [EXCLUSIVE].md
+++ /dev/null
@@ -1,38 +0,0 @@
-
-Campaign Cartographer 3 Download Free: A Guide for Map Makers
-
-If you are a fan of fantasy games, role-playing games, or any other genre that requires creating maps for your own worlds, you might be interested in Campaign Cartographer 3. This is a powerful map making software that can help you design stunning maps in minutes, with a variety of styles, symbols, and types to choose from. In this article, we will show you how to get Campaign Cartographer 3 for free and start mapping today.
-
-What is Campaign Cartographer 3?
-
-Campaign Cartographer 3 is a software developed by ProFantasy Software, a company that specializes in fantasy map-making software for gamers. It is the latest version of their flagship product, which has been used by thousands of gamers, authors, and game designers since 1993. Campaign Cartographer 3 allows you to create maps for any genre, scale, or purpose, from world maps to dungeon maps, from floor plans to city maps. You can also create furniture, terrain features, and other details to make your maps more realistic and immersive.
-Campaign Cartographer 3 Download Free Download Zip ⚡ https://cinurl.com/2uEYEt
-
-What are the features of Campaign Cartographer 3?
-
-Campaign Cartographer 3 has many features that make it a versatile and user-friendly tool for map makers. Some of these features are:
-
-
-It has a simple interface that lets you draw what you want with ease and accuracy.
-It gives you access to a large library of mapping styles, symbols, and types, which you can customize and combine to suit your needs.
-It allows you to have fingertip control over every aspect of mapping, from colors to layers, from effects to grids.
-It supports both vector and bitmap graphics, which means you can create high-quality maps that can be scaled and printed without losing resolution.
-It can export your maps as jpg, png, bmp, or other formats that can be used in other applications or shared online.
-It can import maps from other sources, such as Fractal Terrains 3, a software that generates realistic random worlds.
-It can integrate with other ProFantasy products, such as Symbol Set 6, which adds more symbols for isometric cities.
-It offers free monthly content for CC3+ users, such as new symbols created by Mike Schley, a master artist who has worked for Wizards of the Coast and Paizo.
-
-
-How to get Campaign Cartographer 3 for free?
-
-If you want to try Campaign Cartographer 3 for free, you can download it from the official website of ProFantasy Software. You will need to register an account and provide some basic information. You will then receive a download link and a license key via email. You can install the software on your PC and use it for 14 days without any limitations. If you like it, you can buy it from the webstore and keep using it. If you don't like it, you can get a refund within 14 days, no questions asked.
-
-How to start mapping with Campaign Cartographer 3?
-
-Once you have installed Campaign Cartographer 3 on your PC, you can start mapping right away. You can choose from several templates that have different settings and styles. You can also create your own template or modify an existing one. You can then use the drawing tools to create your map elements, such as landmasses, rivers, roads, buildings, etc. You can also add symbols from the library or import your own. You can adjust the colors, effects, layers, grids, and other options to fine-tune your map. You can also use the navigation tools to zoom in and out, pan around, rotate, or flip your map. When you are done with your map, you can save it as a CC3+ file or export it as an image file.
-
-Conclusion
-
-Campaign Cartographer 3 is a great software for anyone who wants to create maps for their games or stories. It is easy to use, powerful, and flexible. It offers a lot of options and features that can help you create stunning maps in minutes. You can get Campaign Cartographer 3 for free and try it for yourself. If you like it, you can buy it and enjoy its full potential. If you don't like it, you can get your money back without any hassle. So what are you waiting for? Download Campaign Cartographer 3 today and start mapping your imagination!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/model/modeling_attn_mask_utils.py b/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/model/modeling_attn_mask_utils.py
deleted file mode 100644
index c2583a2dd5a09b1119c849ca00f954198d078799..0000000000000000000000000000000000000000
--- a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/model/modeling_attn_mask_utils.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import List, Optional, Tuple, Union
-
-import torch
-
-
-class AttentionMaskConverter:
- """
- A utility attention mask class that allows one to:
- - Create a causal 4d mask
- - Create a causal 4d mask with slided window
- - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length,
- key_value_length) that can be multiplied with attention scores
-
- Parameters:
- is_causal (`bool`):
- Whether the attention mask should be a uni-directional (causal) or bi-directional mask.
-
- sliding_window (`int`, *optional*):
- Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer.
- """
-
- def __init__(self, is_causal: bool, sliding_window: Optional[int] = None):
- self.is_causal = is_causal
- self.sliding_window = sliding_window
-
- if self.sliding_window is not None and self.sliding_window <= 0:
- raise ValueError(
- f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`"
- )
-
- def to_causal_4d(
- self,
- batch_size: int,
- query_length: int,
- key_value_length: int,
- dtype: torch.dtype = torch.float32,
- device: Union[torch.device, "str"] = "cpu",
- ) -> torch.Tensor:
- """
- Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative
- bias to upper right hand triangular matrix (causal mask).
- """
- if not self.is_causal:
- raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.")
-
- # If shape is not cached, create a new causal mask and cache it
- input_shape = (batch_size, query_length)
- past_key_values_length = key_value_length - query_length
-
- # create causal mask
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- causal_4d_mask = None
- if input_shape[-1] > 1 or self.sliding_window is not None:
- causal_4d_mask = self._make_causal_mask(
- input_shape,
- dtype,
- device=device,
- past_key_values_length=past_key_values_length,
- sliding_window=self.sliding_window,
- )
-
- return causal_4d_mask
-
- def to_4d(
- self,
- attention_mask_2d: torch.Tensor,
- query_length: int,
- key_value_length: Optional[int] = None,
- dtype: torch.dtype = torch.float32,
- ) -> torch.Tensor:
- """
- Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length,
- key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is
- causal, a causal mask will be added.
- """
- input_shape = (attention_mask_2d.shape[0], query_length)
-
- # create causal mask
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- causal_4d_mask = None
- if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal:
- if key_value_length is None:
- raise ValueError(
- "This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask."
- )
-
- past_key_values_length = key_value_length - query_length
- causal_4d_mask = self._make_causal_mask(
- input_shape,
- dtype,
- device=attention_mask_2d.device,
- past_key_values_length=past_key_values_length,
- sliding_window=self.sliding_window,
- )
- elif self.sliding_window is not None:
- raise NotImplementedError("Sliding window is currently only implemented for causal masking")
-
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to(
- attention_mask_2d.device
- )
- expanded_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask
-
- return expanded_4d_mask
-
- @staticmethod
- def _make_causal_mask(
- input_ids_shape: torch.Size,
- dtype: torch.dtype,
- device: torch.device,
- past_key_values_length: int = 0,
- sliding_window: Optional[int] = None,
- ):
- """
- Make causal mask used for bi-directional self-attention.
- """
- bsz, tgt_len = input_ids_shape
- mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
- mask_cond = torch.arange(mask.size(-1), device=device)
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
-
- mask = mask.to(dtype)
-
- if past_key_values_length > 0:
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
-
- # add lower triangular sliding window mask if necessary
- if sliding_window is not None:
- diagonal = past_key_values_length - sliding_window + 1
-
- context_mask = 1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)
- mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min)
-
- return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
-
- @staticmethod
- def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
- """
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
- """
- bsz, src_len = mask.size()
- tgt_len = tgt_len if tgt_len is not None else src_len
-
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
-
- inverted_mask = 1.0 - expanded_mask
-
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
-
-
-def _prepare_4d_causal_attention_mask(
- attention_mask: Optional[torch.Tensor],
- input_shape: Union[torch.Size, Tuple, List],
- inputs_embeds: torch.Tensor,
- past_key_values_length: int,
- sliding_window: Optional[int] = None,
-):
- """
- Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
- `(batch_size, key_value_length)`
-
- Args:
- attention_mask (`torch.Tensor` or `None`):
- A 2D attention mask of shape `(batch_size, key_value_length)`
- input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
- The input shape should be a tuple that defines `(batch_size, query_length)`.
- inputs_embeds (`torch.Tensor`):
- The embedded inputs as a torch Tensor.
- past_key_values_length (`int`):
- The length of the key value cache.
- sliding_window (`int`, *optional*):
- If the model uses windowed attention, a sliding window should be passed.
- """
- attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
-
- key_value_length = input_shape[-1] + past_key_values_length
-
- # 4d mask is passed through the layers
- if attention_mask is not None:
- attention_mask = attn_mask_converter.to_4d(
- attention_mask, input_shape[-1], key_value_length, dtype=inputs_embeds.dtype
- )
- else:
- attention_mask = attn_mask_converter.to_causal_4d(
- input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
- )
-
- return attention_mask
-
-
-def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
- """
- Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
- `(batch_size, key_value_length)`
-
- Args:
- mask (`torch.Tensor` or `None`):
- A 2D attention mask of shape `(batch_size, key_value_length)`
- dtype (`torch.dtype`):
- The torch dtype the created mask shall have.
- tgt_len (`int`):
- The target length or query length the created mask shall have.
- """
- return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
-
-
-def _create_4d_causal_attention_mask(
- input_shape: Union[torch.Size, Tuple, List],
- dtype: torch.dtype,
- device: torch.device,
- past_key_values_length: int = 0,
- sliding_window: Optional[int] = None,
-):
- """
- Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)`
-
- Args:
- input_shape (`tuple(int)` or `list(int)` or `torch.Size`):
- The input shape should be a tuple that defines `(batch_size, query_length)`.
- dtype (`torch.dtype`):
- The torch dtype the created mask shall have.
- device (`int`):
- The torch device the created mask shall have.
- sliding_window (`int`, *optional*):
- If the model uses windowed attention, a sliding window should be passed.
- """
- attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)
-
- key_value_length = past_key_values_length + input_shape[-1]
- attention_mask = attn_mask_converter.to_causal_4d(
- input_shape[0], input_shape[-1], key_value_length, dtype=dtype, device=device
- )
-
- return attention_mask
\ No newline at end of file
diff --git a/spaces/terapyon/pyhackcon-qa2/app.py b/spaces/terapyon/pyhackcon-qa2/app.py
deleted file mode 100644
index c93799df8b102ba74b771017ef2402f31bd5531b..0000000000000000000000000000000000000000
--- a/spaces/terapyon/pyhackcon-qa2/app.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import gradio as gr
-from langchain.chains import RetrievalQA
-from langchain.embeddings import OpenAIEmbeddings
-from langchain.llms import OpenAI
-from langchain.vectorstores import Chroma
-
-
-PERSIST_DIR_NAME = "podcast-75"
-
-
-def get_retrieval_qa() -> RetrievalQA:
- embeddings = OpenAIEmbeddings()
- db = Chroma(persist_directory=PERSIST_DIR_NAME, embedding_function=embeddings)
- retriever = db.as_retriever()
- return RetrievalQA.from_chain_type(
- llm=OpenAI(), chain_type="stuff", retriever=retriever
- )
-
-
-def main(query: str):
- qa = get_retrieval_qa()
- answer = qa(query)
- return answer["result"]
-
-
-pyhack_qa = gr.Interface(
- fn=main,
- inputs=[gr.Textbox(label="query")],
- outputs="text",
-)
-pyhack_qa.launch()
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Adobe Animate CC 2017 V16.1.0 (x64) Incl Crack [SadeemPC] 64 Bit EXCLUSIVE.md b/spaces/terfces0erbo/CollegeProjectV2/Adobe Animate CC 2017 V16.1.0 (x64) Incl Crack [SadeemPC] 64 Bit EXCLUSIVE.md
deleted file mode 100644
index 6f7e5b819353a15484f41b70a71afae051547599..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Adobe Animate CC 2017 V16.1.0 (x64) Incl Crack [SadeemPC] 64 Bit EXCLUSIVE.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-How to Download and Install Adobe Animate CC 2017 for Free
-Adobe Animate CC 2017 is a powerful software that allows you to create and code 2D and 3D animations for web projects. It is compatible with digital and mobile platforms and can be used to develop games, movies and content for mobile devices. In this article, we will show you how to download and install Adobe Animate CC 2017 for free on your 64-bit Windows PC.
-Adobe Animate CC 2017 V16.1.0 (x64) Incl Crack [SadeemPC] 64 Bit DOWNLOAD ⚙ https://bytlly.com/2uGiHu
-Step 1: Download the software
-The first step is to download the software from a reliable source. You can use the following link to download Adobe Animate CC 2017 v16.1.0 (x64) Incl Crack [SadeemPC] 64 Bit[^1^]. This is a torrent file, so you will need a torrent client such as uTorrent or BitTorrent to download it.
-Step 2: Extract the files
-After downloading the software, you will need to extract the files from the compressed folder. You can use a software such as WinRAR or 7-Zip to do this. Right-click on the folder and select "Extract Here" or "Extract to Adobe_Animate_CC_2017_v16.0.1". You will get a folder with the same name containing the setup files and the crack files.
-Step 3: Install the software
-To install the software, you will need to run the setup file as an administrator. Double-click on "Set-up.exe" and follow the instructions on the screen. Choose your language, accept the license agreement and select your installation location. Wait for the installation to complete.
-Step 4: Apply the crack
-To activate the software, you will need to apply the crack files. Go to the folder where you extracted the files and open the "Crack" folder. Copy all the files inside and paste them into the installation directory of Adobe Animate CC 2017. This is usually located at C:\Program Files\Adobe\Adobe Animate CC 2017. Replace any existing files if prompted.
-Step 5: Enjoy your software
-You have successfully installed Adobe Animate CC 2017 for free on your PC. You can now launch the software from your desktop or start menu and enjoy creating and coding amazing animations for your web projects.
-
Step 6: Learn some tips and tricks
-Now that you have installed Adobe Animate CC 2017, you might want to learn some tips and tricks to improve your skills and workflow. Here are some resources that can help you:
-
-Animate tutorials | Learn how to use Animate - Adobe Help Center[^1^]: This is the official website of Adobe Animate, where you can find tutorials for beginners and experienced users. You can learn about document types, drawing tools, symbols, color management, gradients, animation techniques, audio, virtual camera, asset warp, HTML5 components, code templates and more.
-Time-saving tips for Adobe Animate CC - YouTube[^2^]: This is a video by Devon Kong, a professional animator and educator. He shows you some pro tips and tricks that can save you time and energy in Adobe Animate CC. You can learn about paste in place, edit multiple frames, find and replace and more.
-Adobe Animate Tutorials: Practical Tips, Tricks and Techniques - YouTube[^3^]: This is a video by Graph Desk, a channel dedicated to graphic design and animation. They show you some practical tips, tricks and techniques that can enhance your animations in Adobe Animate CC. You can learn about easing curves, motion paths, shape tweens, masking layers and more.
-
-Step 7: Have fun and be creative
-Adobe Animate CC 2017 is a powerful and versatile software that can help you create amazing animations for your web projects. You can use it to express your creativity and imagination in different ways. You can also explore other features and tools that are not covered in this article. The only limit is your own imagination. Have fun and be creative with Adobe Animate CC 2017!
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/terfces0erbo/CollegeProjectV2/CRACK Lumion 10.1 Pro Serial Number Reading Tool PATCHED.md b/spaces/terfces0erbo/CollegeProjectV2/CRACK Lumion 10.1 Pro Serial Number Reading Tool PATCHED.md
deleted file mode 100644
index 4ca56fb5e484d1824e5fa5082a5f11f94fc29f82..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/CRACK Lumion 10.1 Pro Serial Number Reading Tool PATCHED.md
+++ /dev/null
@@ -1,8 +0,0 @@
-CRACK Lumion 10.1 Pro Serial Number Reading Tool Download Zip ★ https://bytlly.com/2uGlQZ
-
-Lumion Pro 13.6 Crack Crack Torrent with Activation Code (2022) Lumion Pro Crack is used all over the world for 2D and 3D diagrams. With Lumion for AutoCAD, Autodesk Architectural Visual Modeling is the best and it helps you create exciting, model-level artistic ideas that can be realized in production.
-To do this, use Lumion Pro, which is the most popular for designers, architects, engineers and others.
-With Lumion, you can easily create stunning visual effects and visualize your ideas without spending a lot of money and time. 8a78ff9644
-
-
-
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Death Race 3 Torrent Ita EXCLUSIVE.md b/spaces/terfces0erbo/CollegeProjectV2/Death Race 3 Torrent Ita EXCLUSIVE.md
deleted file mode 100644
index 3008b5bc7f0d1ab0688c66f91be1b8e7d7b9932e..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Death Race 3 Torrent Ita EXCLUSIVE.md
+++ /dev/null
@@ -1,28 +0,0 @@
-Death Race 3 Torrent Ita Download File ››››› https://bytlly.com/2uGjBB
-
-The 3rd installment of the Death Race franchise, which is a remake of the 1981 film of the same name, stars Jason Statham, Scott Adkins, Mike Verdoia, and Keanu Reeves.
-
-"The Slaughterhouse Five" is a 2000 horror film directed by Steve Miner, written by Mark Pellington and starring William Forsythe, Lisa Rinna, and a 7 foot 5 inch Vincent Price lookalike who uses it as his sidekick. It's a remake of the 1972 film of the same name.
-
-The cartoon features two kids, Diego and Ivan, who are trying to get a new skateboard for Diego's birthday.
-
-The family is in fact being held hostage by the two boys.
-
-Chris and Carlos have finally found an anonymous informant to warn them of the impending SWAT team ambush. The informant gives the boys the address of their location, but the boys already moved.
-
-Later that night, the boys find that their location has been moved again. Chris and Carlos go to their new location and find out that the informant was in fact working with the SWAT team that will ambush them. Chris and Carlos decide to go out on their own to find the informant's location, and they discover a place where the informant can hide.
-
-That night, Diego and Ivan move out of their house and spend the night in a place where they feel more comfortable. While the boys are in their sleeping bag, an intruder breaks into their house and pulls a gun on them.
-
-Later that night, Diego finds that his bedroom window has been broken. He then goes out and finds the intruder who pulls a gun on him and chases after him.
-
-Later that night, Carlos finally finds out the address of the informant's location. Meanwhile, the SWAT team has found the informant's location and they are waiting for him to give them the address.
-
-Chris and Carlos find that the informant's location is the place where Diego had found the intruder. They decide to go and get the informant to tell them his location. Diego and Ivan spend the night at a place where they feel more comfortable.
-
-They go to their informant's location, and the informant tells Chris and Carlos that he will not tell them what is going on. They discover that the SWAT team has been ambushed. Chris and Carlos run to the place where they found Diego's intruder, and Diego and Ivan are already there.
-
-They see the intruder trying to get up, but he is 4fefd39f24
-
-
-
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Dolly Parton Discography 21 CD 19682009 FLAC.md b/spaces/terfces0erbo/CollegeProjectV2/Dolly Parton Discography 21 CD 19682009 FLAC.md
deleted file mode 100644
index 7352540f96afbd6f9a40b1392c1a08b72074fdf8..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Dolly Parton Discography 21 CD 19682009 FLAC.md
+++ /dev/null
@@ -1,14 +0,0 @@
-Dolly Parton Discography 21 CD 19682009 FLAC Download Zip 🆗 https://bytlly.com/2uGlsR
-
-Home, Album · Various Artists · Flac · Bollywood · New Single. . many pedals. hits in a wide variety of Dolly Parton - Discography (21 CDs). kaffmar f4bc01c98b 10, 2012 — embrmari f4bc01c98b embrmari February 1 at 5:03 am. The Music Dude · The Music Dude, 19 Feb 2014 . .
-The Music Dude · The Music Dude, February 5, 2014 , .
-On the music portal Zaitsev.net you can download The Music Dude songs for free in MP3. .
-The Music Dude - Discography (25 CDs) . .
-Discography (25 CDs).The Music Dude - Discography (25 CDs).
-Genre: Indie Folk, Acoustic, Folk, Indie Rock, New Wiggles.
-Country: United States. .
-On the music portal Zaitsev.net you can download The Music Dude songs for free in MP3.
-Best . 8a78ff9644
-
-
-
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Comfast 98000g Drivers Download Best Practices and Optimization Tips.md b/spaces/tialenAdioni/chat-gpt-api/logs/Comfast 98000g Drivers Download Best Practices and Optimization Tips.md
deleted file mode 100644
index a037d218efabf6e1c3768c47bd643d93bd65d164..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Comfast 98000g Drivers Download Best Practices and Optimization Tips.md
+++ /dev/null
@@ -1,123 +0,0 @@
-
-Comfast 98000g Drivers Download: How to Install and Use the Wireless Adapter
- Introduction
- If you are looking for a way to improve your wireless internet connection, you might want to consider getting a Comfast 98000g wireless adapter. This device can help you boost your WiFi signal, increase your speed, and reduce interference. But before you can use it, you need to download and install the Comfast 98000g drivers on your computer. In this article, we will show you how to do that in three easy ways. We will also show you how to use the Comfast 98000g wireless adapter once you have installed the drivers.
-comfast 98000g drivers download Download File ✏ ✏ ✏ https://urlcod.com/2uK9GA
- What is Comfast 98000g?
- Comfast 98000g is a wireless adapter that can connect your computer to a WiFi network without using a cable. It has a high-gain antenna that can receive and transmit signals over long distances. It also supports dual-band WiFi, which means it can work on both 2.4GHz and 5GHz frequencies. This can help you avoid congestion and interference from other devices that use the same frequency.
- Comfast 98000g is compatible with Windows XP, Vista, 7, 8, and 10 operating systems. It also supports WEP, WPA, WPA2, and WPS encryption methods, which can protect your data from hackers and intruders.
- Why do you need Comfast 98000g drivers?
- Drivers are software programs that allow your computer to communicate with hardware devices. Without drivers, your computer will not be able to recognize or use the Comfast 98000g wireless adapter. Therefore, you need to download and install the Comfast 98000g drivers before you can use it.
- The drivers will also help you configure the settings of the Comfast 98000g wireless adapter, such as choosing the WiFi network, setting the encryption mode, and adjusting the signal strength.
- How to download Comfast 98000g drivers?
- There are three ways to download Comfast 98000g drivers:
- Option 1: Download from the official website
- The official website of Comfast is http://en.comfast.com.cn/ . You can go to this website and find the Drivers Download section on the homepage. There you can search for the model name of your wireless adapter (CF-934AC) and click on the Download button next to it. You will get a zip file containing the drivers for your device.
- Option 2: Download from Xiaomi Community
- Xiaomi Community is a forum where users of Xiaomi products can share their experiences and tips. You can find a post about Comfast 98000g drivers download on this forum at https://new.c.mi.com/ng/post/82814/Comfast_98000g_Drivers_Download_LINK . You can click on this link and download the drivers from there.
-comfast 98000g wireless adapter drivers download
-how to install comfast 98000g drivers on windows 10
-comfast 98000g drivers for mac os x
-comfast 98000g usb wifi dongle drivers download
-comfast 98000g drivers update free
-comfast 98000g drivers download for linux
-comfast 98000g wifi adapter troubleshooting
-comfast 98000g drivers download latest version
-comfast 98000g wireless network card drivers download
-where to find comfast 98000g drivers online
-comfast 98000g drivers download for windows 7
-comfast 98000g wifi adapter review
-comfast 98000g drivers download for windows 8.1
-comfast 98000g wireless usb adapter setup
-comfast 98000g drivers download for windows xp
-comfast 98000g wifi adapter speed test
-comfast 98000g drivers download for windows vista
-comfast 98000g wireless adapter manual
-comfast 98000g drivers download for windows 8
-comfast 98000g wifi adapter compatibility
-comfast 98000g drivers download for windows server
-comfast 98000g wireless adapter warranty
-comfast 98000g drivers download for android
-comfast 98000g wifi adapter price
-comfast 98000g drivers download for ios
-comfast 98000g wireless adapter specifications
-comfast 98000g drivers download for chrome os
-comfast 98000g wifi adapter features
-comfast 98000g drivers download for ubuntu
-comfast 98000g wireless adapter installation guide
-comfast 98000g drivers download for fedora
-comfast 98000g wifi adapter comparison
-comfast 98000g drivers download for debian
-comfast 98000g wireless adapter driver error fix
-comfast 98000g drivers download for mint
-comfast 98000g wifi adapter support
-comfast 98000g drivers download for centos
-comfast 98000g wireless adapter software update
-comfast 98000g drivers download for kali linux
-comfast 98000g wifi adapter driver uninstallation guide
-comfast 98000g drivers download for arch linux
-comfast 98000g wireless adapter driver backup and restore guide
-comfast 98000g drivers download for manjaro linux
-comfast 98000g wifi adapter driver optimization tips
-comfast 98000g drivers download for elementary os
-comfast 98000g wireless adapter driver compatibility issues and solutions
-comfast 98000g drivers download for zorin os
-comfast 98000g wifi adapter driver performance test
-comfast 98000g drivers download for pop os
-best alternatives to comfast 98000g wifi adapter
- Option 3: Download from OEM Drivers
- OEM Drivers is a website that provides device drivers for various brands and models of hardware devices. You can go to this website at https://oemdrivers.com/comfast and find the Comfast section. There you can scroll down and find the Comfast CF-934AC wireless adapter and click on it. You will be redirected to another page where you can download the drivers for your device.
- How to install Comfast 98000g drivers?
- After you have downloaded the Comfast 98000g drivers, you need to install them on your computer. Here are the steps to do that:
- Step 1: Unzip the downloaded file
- The file that you have downloaded is a zip file, which means it is compressed and needs to be extracted before you can use it. You can use any software that can unzip files, such as WinRAR or WinZip. Right-click on the zip file and choose Extract Here or Extract All. You will get a folder containing several files.
- Step 2: Run the setup.exe file
- Inside the folder that you have extracted, you will find a file named setup.exe. This is the file that will install the drivers on your computer. Double-click on this file and wait for it to run.
- Step 3: Follow the instructions on the screen
- A window will pop up asking you to choose your language. Select English or any other language that you prefer and click OK. Then another window will appear asking you to agree with the terms and conditions of the software license agreement. Read it carefully and click I Agree if you accept it. Then another window will show up asking you to choose where to install the drivers. You can leave it as default or change it if you want. Click Next to continue.
- The installation process will begin and you will see a progress bar showing how much time is left. Wait for it to finish and do not interrupt it.
- Step 4: Restart your computer
- After the installation is complete, you will see a message saying that you need to restart your computer for the changes to take effect. Click Finish and then click Yes when asked if you want to restart now.
- How to use Comfast 98000g wireless adapter?
- Now that you have installed the Comfast 98000g drivers on your computer, you can use the wireless adapter to connect to a WiFi network. Here are the steps to do that:
- Step 1: Plug the adapter into a USB port
- The first thing you need to do is plug the Comfast 98000g wireless adapter into a USB port on your computer. You can use any USB port that is available, but preferably one that is close to where you want to place the antenna.
- Step 2: Connect to a wireless network
- The next thing you need to do is connect to a wireless network that is available in your area. You can do this by clicking on the WiFi icon on your taskbar (the lower right corner of your screen) and choosing a network from the list that appears. If you don't see any networks, make sure that your WiFi is turned on and that there are no physical obstructions between your device and the router.
- If you see a network that has a lock icon next to it, it means that it is secured with a password or a key. You need to enter this password or key before you can connect to it. If you don't know it, ask someone who does or look for another network that is open or has no lock icon.
- Step 3: Enjoy fast and stable internet connection
- Once you have connected to a wireless network, you should be able to access the internet without any problems. You can browse websites, watch videos, play games, download files, or do anything else that requires an internet connection.
- Conclusion
- In this article, we have shown you how to download and install the Comfast 98000g drivers on your computer and how to use the Comfast 98000g wireless adapter to connect to a WiFi network. We hope that this article has been helpful and informative for you. If you have any questions or comments, please feel free to leave them below.
- FAQs
- Here are some frequently asked questions and answers about the Comfast 98000g wireless adapter and drivers:
- Q: Where can I find the user manual for the Comfast 98000g wireless adapter?
- A: You can find the user manual for the Comfast 98000g wireless adapter on the official website of Comfast at http://en.comfast.com.cn/index.php?m=content&c=index&a=lists&catid=81 . You can search for the model name of your wireless adapter (CF-934AC) and click on the Download button next to it. You will get a zip file containing the user manual in PDF format.
- Q: How can I update the Comfast 98000g drivers?
- A: You can update the Comfast 98000g drivers by downloading the latest version from one of the sources mentioned above and installing it on your computer. Alternatively, you can use a driver updater software that can automatically scan your computer for outdated drivers and update them for you.
- Q: How can I troubleshoot the Comfast 98000g wireless adapter?
- A: If you encounter any problems with the Comfast 98000g wireless adapter, such as not being able to connect to a WiFi network, having a weak or unstable signal, or experiencing slow or interrupted internet connection, you can try some of these troubleshooting tips:
-
-Make sure that your computer meets the system requirements for using the Comfast 98000g wireless adapter.
-Make sure that you have installed the correct and updated drivers for your device.
-Make sure that your wireless adapter is plugged into a working USB port and that it is not loose or damaged.
-Make sure that your antenna is positioned properly and that there are no physical obstructions or interferences between your device and the router.
-Make sure that you have selected the right WiFi network and that you have entered the correct password or key if required.
-Make sure that your router is working properly and that it is not overloaded or malfunctioning.
-Try changing the WiFi channel or frequency on your router or device to avoid congestion or interference from other devices.
-Try restarting your computer, your wireless adapter, and your router and see if that solves the problem.
-If none of these tips work, you can contact the customer service of Comfast or consult a professional technician for further assistance.
-
- Q: How can I uninstall the Comfast 98000g drivers?
- A: If you want to uninstall the Comfast 98000g drivers from your computer, you can follow these steps:
-
-Go to Start > Control Panel > Programs and Features (or Add or Remove Programs).
-Find and select the Comfast 98000g drivers from the list of installed programs and click Uninstall (or Remove).
-Follow the instructions on the screen to complete the uninstallation process.
-Restart your computer to apply the changes.
-
- Q: Is Comfast 98000g compatible with Mac OS or Linux?
- A: Unfortunately, no. The Comfast 98000g wireless adapter and drivers are only compatible with Windows operating systems. If you want to use a wireless adapter on a Mac OS or Linux computer, you need to look for another model that supports those operating systems.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Athlean X Workout Pdf 133golkesl.md b/spaces/tioseFevbu/cartoon-converter/scripts/Athlean X Workout Pdf 133golkesl.md
deleted file mode 100644
index 6d9a6bdbeeac68baef16d99a0ee4d5d6c2c7b51a..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Athlean X Workout Pdf 133golkesl.md
+++ /dev/null
@@ -1,27 +0,0 @@
-
-Athlean X Workout Pdf 133golkesl: A Comprehensive Review
-If you are looking for a full body workout plan that can help you build muscle, lose fat and improve your athletic performance, you may have come across Athlean X Workout Pdf 133golkesl. This is a digital product that claims to provide you with 30 days of total body training based on the principles and methods of Jeff Cavaliere, a former physical therapist and strength coach for the New York Mets.
-But what exactly is Athlean X Workout Pdf 133golkesl and how does it work? Is it worth buying and following? What are the pros and cons of this program? In this article, we will try to answer these questions and give you an honest and unbiased review of Athlean X Workout Pdf 133golkesl.
-Athlean X Workout Pdf 133golkesl DOWNLOAD ✺✺✺ https://urlcod.com/2uHxmZ
-What is Athlean X Workout Pdf 133golkesl?
-Athlean X Workout Pdf 133golkesl is a downloadable ebook that contains a full body workout plan for 30 days. The program is divided into two phases: Phase 1 (Days 1-15) and Phase 2 (Days 16-30). Each phase consists of three workouts per week, with a rest day every other day. The workouts are designed to target all the major muscle groups and movement patterns of the body, such as squat, lunge, hinge, push, pull, carry and corrective.
-The program also includes a nutrition guide that gives you some general guidelines on how to eat for optimal results. The guide covers topics such as macronutrients, calories, meal frequency, supplements and hydration. The program does not provide specific meal plans or recipes, but rather encourages you to follow a flexible and balanced diet that suits your preferences and goals.
-How does Athlean X Workout Pdf 133golkesl work?
-Athlean X Workout Pdf 133golkesl works by applying the principles of science-based training and nutrition to help you achieve your fitness goals. The program is based on the following concepts:
-
-Compound exercises: The program focuses on using compound exercises that involve multiple joints and muscles in one movement. These exercises are more effective for building strength, muscle mass and burning calories than isolation exercises that target only one muscle group.
-Progressive overload: The program ensures that you challenge your muscles with increasing intensity and volume over time. This is done by manipulating variables such as weight, reps, sets, rest periods and tempo. Progressive overload is essential for stimulating muscle growth and adaptation.
-Recovery: The program allows you to recover properly between workouts by providing adequate rest days and nutrition. Recovery is crucial for repairing muscle damage, preventing overtraining and enhancing performance.
-Frequency: The program takes advantage of the fact that stimulating and re-stimulating muscle tissue every 48 hours produces the most amount of muscle growth with the least amount of wasted time. By training each muscle group three times per week with a full body split, you can maximize your muscle protein synthesis and growth potential.
-
-What are the pros and cons of Athlean X Workout Pdf 133golkesl?
-Like any other fitness product, Athlean X Workout Pdf 133golkesl has its advantages and disadvantages. Here are some of them:
-Pros
-
-The program is easy to follow and requires only a full gym access. You don't need any special equipment or gadgets to do the workouts.
-The program is suitable for beginners to advanced trainees. You can adjust the intensity and difficulty of the exercises according to your level and experience.
-The program is flexible and adaptable. You can modify or substitute some exercises if you have any limitations or preferences.
-The program is comprehensive and covers all aspects of fitness. You can improve your strength, muscle mass, fat loss, endurance, mobility and athleticism with this program.
-The program is backed by science and experience. Jeff Cavaliere is a reputable expert in the field of sports medicine and fitness. He has worked with professional athletes and celebrities for over two decades. He also has a large following on social media platforms such as YouTube, where he shares valuable information and tips on 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Dishpointer Pro Android Serial.md b/spaces/tioseFevbu/cartoon-converter/scripts/Dishpointer Pro Android Serial.md
deleted file mode 100644
index 1cbe7179e426a6e2e825967042877309ada03524..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Dishpointer Pro Android Serial.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-How to Use DishPointer Pro on Android to Align Your Satellite Dish
-If you are looking for a simple and effective way to align your satellite dish, you might want to try DishPointer Pro on your Android device. DishPointer Pro is an app that uses your phone's camera and GPS to show you where the satellites are in the sky, and how to adjust your dish for the best signal.
-DishPointer Pro is compatible with most Android devices, and it works with any satellite dish. You can use it to find any satellite, including popular ones like DirecTV, Dish Network, Sky, and more. You can also use it to check for any line of sight (LoS) issues, such as trees or buildings that might block the signal.
-Dishpointer Pro Android Serial Download Zip ☆☆☆ https://urlcod.com/2uHy3o
-Here are the steps to use DishPointer Pro on Android to align your satellite dish:
-
-Download and install DishPointer Pro from the Google Play Store. It costs $19.99, but it is worth it for the convenience and accuracy it provides.
-Launch the app and allow it to access your camera and location.
-Select the satellite you want to point your dish at from the list. You can also enter a custom satellite by entering its name and orbital position.
-Hold your phone up towards the sky and move it around until you see the satellite icon on the live camera screen. The app will also show you the elevation, azimuth, and skew angles for your dish.
-Adjust your dish according to the angles shown by the app. You can also use a signal meter or a TV screen to fine-tune your dish alignment.
-Enjoy watching your favorite channels with a clear and stable signal.
-
-DishPointer Pro is a handy tool for anyone who needs to install or adjust a satellite dish. It saves you time and hassle by showing you exactly where to point your dish with your phone. You can download it from here .
Benefits of Satellite Dish System
-Satellite dish system is not only a convenient way to align your satellite dish, but also a great way to enjoy many benefits of satellite television. Satellite television offers you a wide range of channels and services that you might not get from other types of internet or cable providers. Here are some of the benefits of satellite dish system:
-
-Digital clarity and reliability. Satellite television delivers high-quality digital data using MPEG-2 or MPEG-4 compression, which ensures clear and crisp images and sounds. Satellite television is also more reliable than cable television, as it is less prone to interference or outages caused by weather or other factors. [^1^]
-More choices and flexibility. Satellite television gives you access to hundreds of channels from different genres, languages, and regions. You can also customize your package according to your preferences and budget. You can also enjoy features like preview screen, satellite TV receivers, DVRs, and on-demand services that enhance your viewing experience. [^1^] [^2^]
-Availability in rural areas. Satellite television is the best option for people who live in remote or rural areas where other types of internet or cable services are not available. Satellite television only requires a small dish and a clear line of sight to the sky to connect you to the world. You can also use satellite internet service to access the web and other online services. [^2^] [^3^]
-
-Satellite dish system is a smart investment for anyone who wants to enjoy the advantages of satellite television. It is easy to install, use, and maintain with DishPointer Pro on your Android device. You can also save money and time by avoiding the hassle of cable or fiber installation. If you are interested in getting a satellite dish system for your home, you can contact us at Aerials & Satellites for a free quote and consultation.
7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Hollow Man Dubbed From English Telugu Movie 1080p.md b/spaces/tioseFevbu/cartoon-converter/scripts/Hollow Man Dubbed From English Telugu Movie 1080p.md
deleted file mode 100644
index cb94377377c93407d26ab8553181ef5d136c87ae..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Hollow Man Dubbed From English Telugu Movie 1080p.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-Here is a possible title and article with HTML formatting for the keyword "Hollow Man Dubbed From English Telugu Movie 1080p":
-
-Hollow Man: A Sci-Fi Thriller Dubbed in Telugu
-Hollow Man is a 2000 American science fiction thriller film directed by Paul Verhoeven and starring Kevin Bacon, Elisabeth Shue and Josh Brolin. The film is loosely based on the novel The Invisible Man by H.G. Wells and follows a group of scientists who experiment with invisibility on themselves.
-Hollow Man Dubbed From English Telugu Movie 1080p Download ····· https://urlcod.com/2uHvzr
-The film was dubbed in Telugu and released in India as Hollow Man Dubbed From English . The Telugu version of the film is available in 1080p high definition quality on various online platforms. The film received mixed reviews from critics and audiences, but was praised for its visual effects and action sequences.
-The plot of the film revolves around Sebastian Caine (Bacon), a brilliant but arrogant scientist who leads a team of researchers at a top-secret military facility. He develops a serum that can render living beings invisible, and successfully tests it on animals. However, he becomes obsessed with the idea of becoming invisible himself, and secretly injects himself with the serum without informing his colleagues.
-Initially, he enjoys the benefits of invisibility, such as spying on his co-workers and his ex-girlfriend Linda McKay (Shue), who is also part of the team. However, he soon discovers that the serum has side effects, such as making him more aggressive and unstable. He also realizes that he cannot reverse the process, and becomes trapped in his invisible state.
-As his sanity deteriorates, he starts to terrorize and kill his fellow scientists, who try to stop him and find a cure. He also becomes obsessed with Linda, who is now romantically involved with another team member, Matt Kensington (Brolin). The film culminates in a violent showdown between Caine and his former friends, who must use their wits and resources to survive his wrath.
-Hollow Man Dubbed From English is a thrilling and suspenseful film that explores the dark side of human nature and the consequences of playing God. The film features stunning visual effects that create a realistic and terrifying portrayal of invisibility. The film also boasts of a talented cast that delivers convincing performances. The film is not for the faint-hearted, as it contains scenes of violence, gore and nudity.
-If you are a fan of sci-fi thrillers and want to watch Hollow Man Dubbed From English in Telugu, you can find it online on various streaming platforms. You can also watch the trailer of the film here[^2^]. Enjoy the film and let us know your thoughts in the comments section below.
-
Here are a few more paragraphs for the article:
-
-The film was directed by Paul Verhoeven, who is known for his provocative and violent films such as RoboCop , Total Recall and Basic Instinct . Verhoeven wanted to explore the psychological effects of invisibility on a human being, and how it would unleash his darkest impulses. He also wanted to create a realistic and scientific approach to the concept of invisibility, using state-of-the-art computer-generated imagery (CGI) to achieve the stunning effects.
-The film was nominated for an Academy Award for Best Visual Effects, but lost to Gladiator . The film was also criticized for its excessive violence, nudity and sexual assault scenes, which some viewers found disturbing and unnecessary. The film was rated R in the United States and 18 in the United Kingdom. The film was also banned in some countries such as China and Malaysia.
-The film was a commercial success, grossing over $190 million worldwide against a budget of $95 million. The film spawned a direct-to-video sequel in 2006, titled Hollow Man 2 , starring Christian Slater as a new invisible man. The sequel was poorly received by critics and audiences alike.
-Hollow Man Dubbed From English is a film that offers a thrilling and terrifying ride into the mind of a man who loses his humanity and becomes a monster. The film is not for everyone, as it contains graphic and disturbing content that may offend some viewers. However, if you are looking for a sci-fi thriller that pushes the boundaries of cinema and technology, you may enjoy this film.
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Inside Out Thought Bubbles V1.11.0 Premium Unlocked APK ((LINK)).md b/spaces/tioseFevbu/cartoon-converter/scripts/Inside Out Thought Bubbles V1.11.0 Premium Unlocked APK ((LINK)).md
deleted file mode 100644
index 0202f24b9122d8e4902baf49ad185b03624a45b6..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Inside Out Thought Bubbles V1.11.0 Premium Unlocked APK ((LINK)).md
+++ /dev/null
@@ -1,31 +0,0 @@
-
-```html
-Inside Out Thought Bubbles v1.11.0 Premium Unlocked APK
-If you love the Disney Pixar movie Inside Out, you will enjoy playing Inside Out Thought Bubbles, a fun and addictive bubble shooter game based on the emotions of the film. You can join Joy, Sadness, Anger, Fear and Disgust in this exciting adventure through the mind of Riley, a young girl who is trying to cope with her new life in San Francisco.
-Inside Out Thought Bubbles v1.11.0 Premium Unlocked APK Download Zip ►►►►► https://urlcod.com/2uHx0w
-Inside Out Thought Bubbles is more than just a typical bubble shooter game. You can use the unique power-ups of each emotion to pop and blast memory bubbles, create sunbursts with Joy, let the rain pour with Sadness, blaze a fiery path with Anger, repel matching memories with Fear, and scatter orbs in frantic fun with Disgust. You can also unlock characters as you progress through the game and explore different locations inspired by the movie, such as Family Island, Dream Productions, Imagination Land and more.
-However, if you want to enjoy the full features of Inside Out Thought Bubbles, you will need to purchase the premium version of the game, which costs $2.99. The premium version unlocks all the levels and characters, removes ads and gives you unlimited lives and boosters. But don't worry, we have a solution for you. You can download the Inside Out Thought Bubbles v1.11.0 Premium Unlocked APK from our website for free and enjoy the game without any limitations.
-The Inside Out Thought Bubbles v1.11.0 Premium Unlocked APK is a modified version of the original game that has all the premium features unlocked. You can install it on your Android device easily and safely without rooting or jailbreaking your device. All you need to do is follow these simple steps:
-
-Download the Inside Out Thought Bubbles v1.11.0 Premium Unlocked APK file from our website.
-Go to your device settings and enable unknown sources.
-Locate the downloaded file and tap on it to install it.
-Launch the game and enjoy!
-
-That's it! You can now play Inside Out Thought Bubbles with all the premium features unlocked and have fun with your favorite emotions. Download the Inside Out Thought Bubbles v1.11.0 Premium Unlocked APK today and share your thoughts with us in the comments below.
-
-```
-
-```html
-If you are wondering why you should play Inside Out Thought Bubbles, here are some reasons why:
-
-It is a fun and challenging game that will test your skills and strategy.
-It is a great way to relive the movie and explore the emotions of Riley.
-It is a family-friendly game that can be enjoyed by kids and adults alike.
-It has beautiful graphics and sound effects that will immerse you in the game.
-It has regular updates and new features that will keep you entertained.
-
-So what are you waiting for? Download the Inside Out Thought Bubbles v1.11.0 Premium Unlocked APK now and join the emotional journey of Riley and her emotions. You will not regret it!
-``` 7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/utils/deprecation.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/utils/deprecation.py
deleted file mode 100644
index 72bd6f25a554b303d0bf5028145cf3a5c71b3e06..0000000000000000000000000000000000000000
--- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/utils/deprecation.py
+++ /dev/null
@@ -1,120 +0,0 @@
-"""
-A module that implements tooling to enable easy warnings about deprecations.
-"""
-
-import logging
-import warnings
-from typing import Any, Optional, TextIO, Type, Union
-
-from pip._vendor.packaging.version import parse
-
-from pip import __version__ as current_version # NOTE: tests patch this name.
-
-DEPRECATION_MSG_PREFIX = "DEPRECATION: "
-
-
-class PipDeprecationWarning(Warning):
- pass
-
-
-_original_showwarning: Any = None
-
-
-# Warnings <-> Logging Integration
-def _showwarning(
- message: Union[Warning, str],
- category: Type[Warning],
- filename: str,
- lineno: int,
- file: Optional[TextIO] = None,
- line: Optional[str] = None,
-) -> None:
- if file is not None:
- if _original_showwarning is not None:
- _original_showwarning(message, category, filename, lineno, file, line)
- elif issubclass(category, PipDeprecationWarning):
- # We use a specially named logger which will handle all of the
- # deprecation messages for pip.
- logger = logging.getLogger("pip._internal.deprecations")
- logger.warning(message)
- else:
- _original_showwarning(message, category, filename, lineno, file, line)
-
-
-def install_warning_logger() -> None:
- # Enable our Deprecation Warnings
- warnings.simplefilter("default", PipDeprecationWarning, append=True)
-
- global _original_showwarning
-
- if _original_showwarning is None:
- _original_showwarning = warnings.showwarning
- warnings.showwarning = _showwarning
-
-
-def deprecated(
- *,
- reason: str,
- replacement: Optional[str],
- gone_in: Optional[str],
- feature_flag: Optional[str] = None,
- issue: Optional[int] = None,
-) -> None:
- """Helper to deprecate existing functionality.
-
- reason:
- Textual reason shown to the user about why this functionality has
- been deprecated. Should be a complete sentence.
- replacement:
- Textual suggestion shown to the user about what alternative
- functionality they can use.
- gone_in:
- The version of pip does this functionality should get removed in.
- Raises an error if pip's current version is greater than or equal to
- this.
- feature_flag:
- Command-line flag of the form --use-feature={feature_flag} for testing
- upcoming functionality.
- issue:
- Issue number on the tracker that would serve as a useful place for
- users to find related discussion and provide feedback.
- """
-
- # Determine whether or not the feature is already gone in this version.
- is_gone = gone_in is not None and parse(current_version) >= parse(gone_in)
-
- message_parts = [
- (reason, f"{DEPRECATION_MSG_PREFIX}{{}}"),
- (
- gone_in,
- "pip {} will enforce this behaviour change."
- if not is_gone
- else "Since pip {}, this is no longer supported.",
- ),
- (
- replacement,
- "A possible replacement is {}.",
- ),
- (
- feature_flag,
- "You can use the flag --use-feature={} to test the upcoming behaviour."
- if not is_gone
- else None,
- ),
- (
- issue,
- "Discussion can be found at https://github.com/pypa/pip/issues/{}",
- ),
- ]
-
- message = " ".join(
- format_str.format(value)
- for value, format_str in message_parts
- if format_str is not None and value is not None
- )
-
- # Raise as an error if this behaviour is deprecated.
- if is_gone:
- raise PipDeprecationWarning(message)
-
- warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py
deleted file mode 100644
index 50df4e2db500d575eaddd7538b49cc808e30b50e..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py'
-model = dict(
- pretrained='open-mmlab://res2net101_v1d_26w_4s',
- backbone=dict(type='Res2Net', depth=101, scales=4, base_width=26))
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/base_assigner.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/base_assigner.py
deleted file mode 100644
index 1ff0160dbb4bfbf53cb40d1d5cb29bcc3d197a59..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/base_assigner.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from abc import ABCMeta, abstractmethod
-
-
-class BaseAssigner(metaclass=ABCMeta):
- """Base assigner that assigns boxes to ground truth boxes."""
-
- @abstractmethod
- def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
- """Assign boxes to either a ground truth boxes or a negative boxes."""
diff --git a/spaces/tracinginsights/api/utils.py b/spaces/tracinginsights/api/utils.py
deleted file mode 100644
index 4fb1dde4b15cfac381c50a7e75d59949122f502a..0000000000000000000000000000000000000000
--- a/spaces/tracinginsights/api/utils.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import json
-
-import requests
-
-
-# make sure that year can be from 2018 to current year
-class LatestData:
-
- def __init__(self, year):
-
- self.year = year
- self.data = self.get_f1_data()
- self.events = self.get_events()
-
- def get_f1_data(self):
- response = requests.get(
- f"https://livetiming.formula1.com/static/{self.year}/Index.json", timeout=5)
- if response.status_code == 200:
- try:
- data = response.content.decode("utf-8-sig")
- return json.loads(data)
- except json.JSONDecodeError as e:
- print("Failed to parse JSON data:", e)
- return None
- else:
- print("Failed to get data. Status code:", response.status_code)
- return None
-
- def get_events(self):
- events = []
- for meeting in self.data['Meetings']:
- events.append(meeting['Name'])
-
- return events
-
- def get_sessions(self, event):
- sessions = []
- for meeting in self.data['Meetings']:
- if meeting['Name'] == event:
- for session in meeting['Sessions']:
- sessions.append(session['Name'])
-
- return sessions
-
-
-def team_colors(year: int) -> dict:
- team_colors = {}
-
- if year == 2023:
- team_colors = {
- "Red Bull Racing": "#ffe119",
- "Ferrari": "#e6194b",
- "Aston Martin": "#3cb44b",
- "Mercedes": "#00c0bf",
- "Alpine": "#f032e6",
- "Haas F1 Team": "#ffffff",
- "McLaren": "#f58231",
- "Alfa Romeo": "#800000",
- "AlphaTauri": "#dcbeff",
- "Williams": "#4363d8",
-
- "Red Bull Racing Honda RBPT": "#ffe119",
- "Ferrari": "#e6194b",
- "Aston Martin Aramco Mercedes": "#3cb44b",
- "Mercedes": "#00c0bf",
- "Alpine Renault": "#f032e6",
- "Haas Ferrari": "#ffffff",
- "McLaren Mercedes": "#f58231",
- "Alfa Romeo Ferrari": "#800000",
- "AlphaTauri Honda RBPT": "#dcbeff",
- "Williams Mercedes": "#4363d8",
- "Red Bull": "#ffe119",
- "Alpine F1 Team": "#f032e6",
-
-
-
- }
- if year == 2022:
-
- team_colors = {
- "Red Bull Racing": "#ffe119",
- "Ferrari": "#e6194b",
- "Aston Martin": "#3cb44b",
- "Mercedes": "#00c0bf",
- "Alpine": "#f032e6",
- "Haas F1 Team": "#ffffff",
- "McLaren": "#f58231",
- "Alfa Romeo": "#800000",
- "AlphaTauri": "#dcbeff",
- "Williams": "#4363d8",
-
- "Red Bull": "#ffe119",
- "Alpine F1 Team": "#f032e6",
-
-
- }
-
- if year == 2021:
-
- team_colors = {
- "Red Bull Racing": "#ffe119",
- "Mercedes": "#00c0bf",
- "Ferrari": "#e6194b",
- "Alpine": "#f032e6",
- "McLaren": "#f58231",
- "Alfa Romeo Racing": "#800000",
- "Aston Martin": "#3cb44b",
- "Haas F1 Team": "#ffffff",
- "AlphaTauri": "#dcbeff",
- "Williams": "#4363d8",
-
- "Red Bull": "#ffe119",
- "Alpine F1 Team": "#f032e6",
- "Alfa Romeo": "#800000",
-
-
- }
-
- if year == 2020:
-
- team_colors = {
- "Red Bull Racing": "#000099",
- "Renault": "#ffe119",
- "Racing Point": "#f032e6",
- "Mercedes": "#00c0bf",
- "Ferrari": "#e6194b",
- "McLaren": "#f58231",
- "Alfa Romeo Racing": "#800000",
- "Haas F1 Team": "#ffffff",
- "AlphaTauri": "#dcbeff",
- "Williams": "#4363d8",
-
- "Red Bull": "#000099",
- "Alfa Romeo": "#800000",
-
-
- }
-
- if year == 2019:
-
- team_colors = {
- "Red Bull Racing": "#000099",
- "Renault": "#ffe119",
- "Racing Point": "#f032e6",
- "Toro Rosso": "#dcbeff",
- "Mercedes": "#00c0bf",
- "Ferrari": "#e6194b",
- "McLaren": "#f58231",
- "Alfa Romeo Racing": "#800000",
- "Haas F1 Team": "#ffffff",
- "Williams": "#4363d8",
-
- "Red Bull": "#000099",
- "Alfa Romeo": "#800000",
-
-
- }
-
- if year == 2018:
-
- team_colors = {
- "Red Bull Racing": "#000099",
- "Renault": "#ffe119",
- "Toro Rosso": "#dcbeff",
- "Force India": "#f032e6",
- "Sauber": "#800000",
- "Mercedes": "#00c0bf",
- "Ferrari": "#e6194b",
- "McLaren": "#f58231",
- "Haas F1 Team": "#ffffff",
- "Williams": "#4363d8",
-
- "Red Bull": "#000099",
-
-
-
- }
-
- return team_colors
diff --git a/spaces/triple-t/ttt-space/static/_app/immutable/modules/pages/_page.ts-dc85b7cd.js b/spaces/triple-t/ttt-space/static/_app/immutable/modules/pages/_page.ts-dc85b7cd.js
deleted file mode 100644
index 71efcf2b85eb1d54f2df633eb6f02383aeb6446d..0000000000000000000000000000000000000000
--- a/spaces/triple-t/ttt-space/static/_app/immutable/modules/pages/_page.ts-dc85b7cd.js
+++ /dev/null
@@ -1 +0,0 @@
-import{p}from"../../chunks/_page-da46b06b.js";export{p as prerender};
diff --git a/spaces/trttung1610/musicgen/audiocraft/data/__init__.py b/spaces/trttung1610/musicgen/audiocraft/data/__init__.py
deleted file mode 100644
index 2906ff12bc85a894837579f3137f6f71a0438329..0000000000000000000000000000000000000000
--- a/spaces/trttung1610/musicgen/audiocraft/data/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Audio loading and writing support. Datasets for raw audio
-or also including some metadata."""
-
-# flake8: noqa
-from . import audio, audio_dataset, info_audio_dataset, music_dataset, sound_dataset
diff --git a/spaces/trttung1610/musicgen/tests/models/test_musicgen.py b/spaces/trttung1610/musicgen/tests/models/test_musicgen.py
deleted file mode 100644
index 65618a9e2ef5bb382694b50b23dd50958d590d4e..0000000000000000000000000000000000000000
--- a/spaces/trttung1610/musicgen/tests/models/test_musicgen.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import pytest
-import torch
-
-from audiocraft.models import MusicGen
-
-
-class TestMusicGenModel:
- def get_musicgen(self):
- mg = MusicGen.get_pretrained(name='debug', device='cpu')
- mg.set_generation_params(duration=2.0, extend_stride=2.)
- return mg
-
- def test_base(self):
- mg = self.get_musicgen()
- assert mg.frame_rate == 25
- assert mg.sample_rate == 32000
- assert mg.audio_channels == 1
-
- def test_generate_unconditional(self):
- mg = self.get_musicgen()
- wav = mg.generate_unconditional(3)
- assert list(wav.shape) == [3, 1, 64000]
-
- def test_generate_continuation(self):
- mg = self.get_musicgen()
- prompt = torch.randn(3, 1, 32000)
- wav = mg.generate_continuation(prompt, 32000)
- assert list(wav.shape) == [3, 1, 64000]
-
- prompt = torch.randn(2, 1, 32000)
- wav = mg.generate_continuation(
- prompt, 32000, ['youpi', 'lapin dort'])
- assert list(wav.shape) == [2, 1, 64000]
-
- prompt = torch.randn(2, 1, 32000)
- with pytest.raises(AssertionError):
- wav = mg.generate_continuation(
- prompt, 32000, ['youpi', 'lapin dort', 'one too many'])
-
- def test_generate(self):
- mg = self.get_musicgen()
- wav = mg.generate(
- ['youpi', 'lapin dort'])
- assert list(wav.shape) == [2, 1, 64000]
-
- def test_generate_long(self):
- mg = self.get_musicgen()
- mg.max_duration = 3.
- mg.set_generation_params(duration=4., extend_stride=2.)
- wav = mg.generate(
- ['youpi', 'lapin dort'])
- assert list(wav.shape) == [2, 1, 32000 * 4]
diff --git a/spaces/tsi-org/Faceswapper/roop/processors/__init__.py b/spaces/tsi-org/Faceswapper/roop/processors/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/umair007/all_in_one_converter_modified/app.py b/spaces/umair007/all_in_one_converter_modified/app.py
deleted file mode 100644
index efa601f761efc3db911a9e332ab679013e44f6b6..0000000000000000000000000000000000000000
--- a/spaces/umair007/all_in_one_converter_modified/app.py
+++ /dev/null
@@ -1,521 +0,0 @@
-import os, gdown
-import aria2p
-import subprocess
-import requests
-import numpy as np
-import gradio as gr
-from diffusers import FlaxStableDiffusionPipeline
-import torch
-from safetensors.torch import save_file, load_file
-from huggingface_hub import model_info, create_repo, create_branch, upload_folder
-from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
-
-def download_file(file_url_1, file_name_1):
- # Create the file directory if it doesn't exist
- os.makedirs("file", exist_ok=True)
-
- if "drive.google.com" in file_url_1:
- gdown.download(url=file_url_1, output=f"file/{file_name_1}", quiet=False, fuzzy=True)
- return "download file done!"
- elif "civitai.com" in file_url_1:
- os.system(f"aria2c --out=file/{file_name_1} --summary-interval=10 -c -x 10 -k 1M -s 10 {file_url_1}")
- return "download file done!"
- elif "huggingface.co" in file_url_1:
- os.system(f"aria2c --out=file/{file_name_1} --summary-interval=10 -c -x 10 -k 1M -s 10 {file_url_1}")
- return "download file done!"
- else:
- try:
- response = requests.get(ckpt_url_1)
- if response.status_code == 200:
- with open(f"file/{file_name_1}", "wb") as f:
- return "download file done!"
- else:
- return "error download file!"
- except Exception as e:
- return "error download file!"
-
-
-
-def push_file_1(model_to, token, branch):
- try:
- repo_exists = True
- r_info = model_info(model_to, token=token)
- except RepositoryNotFoundError:
- repo_exists = False
- finally:
- if repo_exists:
- print(r_info)
- else:
- create_repo(model_to, private=True, token=token)
- try:
- branch_exists = True
- b_info = model_info(model_to, revision=branch, token=token)
- except RevisionNotFoundError:
- branch_exists = False
- finally:
- if branch_exists:
- print(b_info)
- else:
- create_branch(model_to, branch=branch, token=token)
- # Create the ckpt directory if it doesn't exist
- os.makedirs("file", exist_ok=True)
- upload_folder(folder_path="file", path_in_repo="", revision=branch, repo_id=model_to, commit_message=f"file", token=token)
- return "push files done!"
-
-def delete_file():
- os.system(f"rm -rf file")
- return "delete ckpt done!"
-
-def download_ckpt(ckpt_url, ckpt_name):
- # Create the ckpt directory if it doesn't exist
- os.makedirs("ckpt", exist_ok=True)
-
- if "drive.google.com" in ckpt_url:
- gdown.download(url=ckpt_url, output=f"ckpt/{ckpt_name}.ckpt", quiet=False, fuzzy=True)
- return "download ckpt done!"
- elif "civitai.com" in ckpt_url:
- os.system(f"aria2c --out=ckpt/{ckpt_name} --summary-interval=10 -c -x 10 -k 1M -s 10 {ckpt_url}")
- return "download ckpt done!"
- elif "huggingface.co" in ckpt_url:
- os.system(f"aria2c --out=ckpt/{ckpt_name} --summary-interval=10 -c -x 10 -k 1M -s 10 {ckpt_url}")
- return "download ckpt done!"
- else:
- try:
- response = requests.get(ckpt_url)
- if response.status_code == 200:
- with open(f"ckpt/{ckpt_name}.ckpt", "wb") as f:
- return "download ckpt done!"
- else:
- return "error download ckpt!"
- except Exception as e:
- return "error download ckpt!"
-
-
-def to_pt():
- os.system("wget -q https://raw.githubusercontent.com/huggingface/diffusers/main/scripts/convert_original_stable_diffusion_to_diffusers.py")
- os.system(f"python3 convert_original_stable_diffusion_to_diffusers.py --checkpoint_path model.ckpt --dump_path pt")
- return "convert to pt done!"
-
-def push_pt(model_to, token, branch):
- try:
- repo_exists = True
- r_info = model_info(model_to, token=token)
- except RepositoryNotFoundError:
- repo_exists = False
- finally:
- if repo_exists:
- print(r_info)
- else:
- create_repo(model_to, private=True, token=token)
- try:
- branch_exists = True
- b_info = model_info(model_to, revision=branch, token=token)
- except RevisionNotFoundError:
- branch_exists = False
- finally:
- if branch_exists:
- print(b_info)
- else:
- create_branch(model_to, branch=branch, token=token)
- upload_folder(folder_path="pt", path_in_repo="", revision=branch, repo_id=model_to, commit_message=f"pt", token=token)
- return "push pt done!"
-
-def delete_pt():
- os.system(f"rm -rf pt")
- return "delete pt done!"
-
-def clone_pt(model_url):
- os.system("git lfs install")
- os.system(f"git clone https://huggingface.co/{model_url} pt")
- return "clone pt done!"
-
-def to_flax():
- pipe, params = FlaxStableDiffusionPipeline.from_pretrained("pt", from_pt=True)
- pipe.save_pretrained("flax", params=params)
- return "convert to flax done!"
-
-def push_flax(model_to, token, branch):
- try:
- repo_exists = True
- r_info = model_info(model_to, token=token)
- except RepositoryNotFoundError:
- repo_exists = False
- finally:
- if repo_exists:
- print(r_info)
- else:
- create_repo(model_to, private=True, token=token)
- try:
- branch_exists = True
- b_info = model_info(model_to, revision=branch, token=token)
- except RevisionNotFoundError:
- branch_exists = False
- finally:
- if branch_exists:
- print(b_info)
- else:
- create_branch(model_to, branch=branch, token=token)
- upload_folder(folder_path="flax", path_in_repo="", revision=branch, repo_id=model_to, commit_message=f"flax", token=token)
- return "push flax done!"
-
-def delete_flax():
- os.system(f"rm -rf flax")
- return "delete flax done!"
-
-def to_ckpt(ckpt_name):
- os.system("wget -q https://raw.githubusercontent.com/huggingface/diffusers/main/scripts/convert_diffusers_to_original_stable_diffusion.py")
- os.system("mkdir ckpt")
- os.system(f"python3 convert_diffusers_to_original_stable_diffusion.py --model_path pt --checkpoint_path ckpt/{ckpt_name}.ckpt")
- return "convert to ckpt done!"
-
-def push_ckpt(model_to, token, branch):
- try:
- repo_exists = True
- r_info = model_info(model_to, token=token)
- except RepositoryNotFoundError:
- repo_exists = False
- finally:
- if repo_exists:
- print(r_info)
- else:
- create_repo(model_to, private=True, token=token)
- try:
- branch_exists = True
- b_info = model_info(model_to, revision=branch, token=token)
- except RevisionNotFoundError:
- branch_exists = False
- finally:
- if branch_exists:
- print(b_info)
- else:
- create_branch(model_to, branch=branch, token=token)
- upload_folder(folder_path="ckpt", path_in_repo="", revision=branch, repo_id=model_to, commit_message=f"ckpt", token=token)
- return "push ckpt done!"
-
-def delete_ckpt():
- os.system(f"rm -rf ckpt")
- return "delete ckpt done!"
-
-def download_ckpt_1(ckpt_url_2, ckpt_name_2):
- # Create the ckpt directory if it doesn't exist
- os.makedirs("ckpt", exist_ok=True)
-
- if "drive.google.com" in ckpt_url_2:
- gdown.download(url=ckpt_url_2, output=f"ckpt/{ckpt_name_2}.ckpt", quiet=False, fuzzy=True)
- return "download ckpt done!"
- elif "civitai.com" in ckpt_url_2:
- os.system(f"aria2c --out=ckpt/{ckpt_name_2}.ckpt --summary-interval=10 -c -x 10 -k 1M -s 10 {ckpt_url_2}")
- return "download ckpt done!"
- elif "huggingface.co" in ckpt_url_2:
- os.system(f"aria2c --out=ckpt/{ckpt_name_2}.ckpt --summary-interval=10 -c -x 10 -k 1M -s 10 {ckpt_url_2}")
- return "download ckpt done!"
- else:
- try:
- response = requests.get(ckpt_url_2)
- if response.status_code == 200:
- with open(f"ckpt/{ckpt_name_2}.ckpt", "wb") as f:
- return "download ckpt done!"
- else:
- return "error download ckpt!"
- except Exception as e:
- return "error download ckpt!"
-
-
-def to_safetensors(ckpt_name_2, safetensors_name_2):
- os.system("mkdir safetensors")
- weights = torch.load(f"ckpt/{ckpt_name_2}.ckpt")
- if "state_dict" in weights:
- weights = weights["state_dict"]
- save_file(weights, f"safetensors/{safetensors_name_2}.safetensors")
- return "convert to safetensors done!"
-
-def push_safetensors(model_to, token, branch):
- try:
- repo_exists = True
- r_info = model_info(model_to, token=token)
- except RepositoryNotFoundError:
- repo_exists = False
- finally:
- if repo_exists:
- print(r_info)
- else:
- create_repo(model_to, private=True, token=token)
- try:
- branch_exists = True
- b_info = model_info(model_to, revision=branch, token=token)
- except RevisionNotFoundError:
- branch_exists = False
- finally:
- if branch_exists:
- print(b_info)
- else:
- create_branch(model_to, branch=branch, token=token)
- upload_folder(folder_path="safetensors", path_in_repo="", revision=branch, repo_id=model_to, commit_message=f"safetensors", token=token)
- return "push safetensors done!"
-
-def delete_safetensors():
- os.system(f"rm -rf safetensors")
- return "delete safetensors done!"
-
-def download_safetensors(safetensors_url, safetensors_name):
- # Create the safetensors directory if it doesn't exist
- os.makedirs("safetensors", exist_ok=True)
-
- if "drive.google.com" in safetensors_url:
- gdown.download(url=safetensors_url, output=f"safetensors/{safetensors_name}.safetensors", quiet=False, fuzzy=True)
- return "download safetensors done!"
- elif "civitai.com" in safetensors_url:
- os.system(f"aria2c --out=safetensors/{safetensors_name}.safetensors --summary-interval=10 -c -x 10 -k 1M -s 10 {safetensors_url}")
- return "download safetensors done!"
- elif "huggingface.co" in safetensors_url:
- os.system(f"aria2c --out=safetensors/{safetensors_name}.safetensors --summary-interval=10 -c -x 10 -k 1M -s 10 {safetensors_url}")
- return "download safetensors done!"
- else:
- try:
- response = requests.get(safetensors_url)
- if response.status_code == 200:
- with open(f"safetensors/{safetensors_name}.safetensors", "wb") as f:
- return "download safetensors done!"
- else:
- return "error download safetensors!"
- except Exception as e:
- return "error download safetensors!"
-
-def from_safetensors_to_ckpt(safetensors_name, ckpt_name):
- weights = load_file(f"safetensors/{safetensors_name}.safetensors", device="cpu")
- os.system("mkdir ckpt")
- torch.save(weights, f"ckpt/{ckpt_name}.ckpt")
- return "convert to ckpt done!"
-
-def delete_torrent():
- os.system(f"rm -rf torrent")
- return "delete torrent done!"
-def delete_ckpt():
- os.system(f"rm -rf ckpt")
- return "delete ckpt done!"
-
-def delete_pt():
- os.system(f"rm -rf pt")
- return "delete pt done!"
-
-def delete_flax():
- os.system(f"rm -rf flax")
- return "delete flax done!"
-
-def delete_safetensors():
- os.system(f"rm -rf safetensors")
- return "delete safetensors done!"
-def delete_all():
- delete_ckpt()
- delete_torrent()
- delete_pt()
- delete_flax()
- delete_safetensors()
- return "delete all done!"
-
-block = gr.Blocks()
-
-with block:
-
- gr.Markdown(
- """
- ## Now Using aria2c for better downloading
- ## 🚨 Please first click all delete buttons 🚨 🎉
- ## Almost all Download site link works
- ### Special thanks to [@camenduru](https://huggingface.co/camenduru) for creating initial script
- ### Modified By [@umair007](https://huggingface.co/umair007)
-
- """)
-# Add delete buttons to the interface
- with gr.Group():
- with gr.Box():
- with gr.Row().style(equal_height=True):
- out_pt = gr.Textbox(show_label=False)
- out_ckpt = gr.Textbox(show_label=False)
- out_flax = gr.Textbox(show_label=False)
- out_torrent = gr.Textbox(show_label=False)
- out_safetensors = gr.Textbox(show_label=False)
- out_delete_all = gr.Textbox(show_label=False)
- with gr.Row().style(equal_height=True):
- # Delete buttons
- btn_delete_pt = gr.Button("delete pt")
- btn_delete_torrent = gr.Button("Delete torrent")
- btn_delete_ckpt = gr.Button("delete ckpt")
- btn_delete_flax = gr.Button("delete flax")
- btn_delete_safetensors = gr.Button("delete safetensors")
- # Delete all button
- btn_delete_all = gr.Button("delete all")
- # Delete click
- btn_delete_pt.click(delete_pt, outputs=out_pt)
- btn_delete_ckpt.click(delete_ckpt, outputs=out_ckpt)
- btn_delete_flax.click(delete_flax, outputs=out_flax)
- btn_delete_safetensors.click(delete_safetensors, outputs=out_safetensors)
- btn_delete_torrent.click(delete_torrent, outputs=out_torrent)
- btn_delete_all.click(delete_all, outputs=out_delete_all)
-
- gr.Markdown(
- """
- ### download and push file ckpt, yaml, safetensors etc
- file_url = https://civitai.com/api/download/models/4224 or https://huggingface.co/prompthero/openjourney/resolve/main/mdjrny-v4.ckpt or https://drive.google.com/file/d/file-id/view?usp=share_link
- file_name = openjourney.ckpt or openjourney.safetensors whatsever your extension is
- extension_name = ckpt, yaml, safetensors etc whatsever your file extension_name is
- ckpt_model_to = camenduru/openjourney
- branch = ckpt
- token = get from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) new token role=write
- """)
- with gr.Group():
- with gr.Box():
- with gr.Row().style(equal_height=True):
- text_file_url_1 = gr.Textbox(show_label=False, max_lines=1, placeholder="file_url")
- text_file_name_1 = gr.Textbox(show_label=False, max_lines=1, placeholder="file_name ex file.ckpt ")
- text_file_model_to = gr.Textbox(show_label=False, max_lines=1, placeholder="file_model_to")
- text_file_branch = gr.Textbox(show_label=False, value="file", max_lines=1, placeholder="file_branch")
- text_file_token = gr.Textbox(show_label=False, max_lines=1, placeholder="🤗 token")
- out_file = gr.Textbox(show_label=False)
- with gr.Row().style(equal_height=True):
- btn_download_file = gr.Button("Download file")
- btn_push_file_1 = gr.Button("Push file to 🤗")
- btn_delete_file = gr.Button("Delete file")
- btn_download_file.click(download_file, inputs=[text_file_url_1, text_file_name_1], outputs=out_file)
- btn_push_file_1.click(push_file_1, inputs=[text_file_model_to, text_file_token, text_file_branch], outputs=out_file)
- btn_delete_file.click(delete_file, outputs=out_file)
-
- gr.Markdown(
- """
- ### ckpt to pytorch
- ckpt_url = https://civitai.com/api/download/models/4224 or https://huggingface.co/prompthero/openjourney/resolve/main/mdjrny-v4.ckpt or https://drive.google.com/file/d/file-id/view?usp=share_link
- pt_model_to = camenduru/openjourney
- branch = main
- token = get from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) new token role=write
- """)
-
- with gr.Group():
- with gr.Box():
- with gr.Row().style(equal_height=True):
- text_ckpt_url = gr.Textbox(show_label=False, max_lines=1, placeholder="ckpt_url")
- text_pt_model_to = gr.Textbox(show_label=False, max_lines=1, placeholder="pt_model_to")
- text_pt_branch = gr.Textbox(show_label=False, value="main", max_lines=1, placeholder="branch")
- text_pt_token = gr.Textbox(show_label=False, max_lines=1, placeholder="🤗 token")
- out_pt = gr.Textbox(show_label=False)
- with gr.Row().style(equal_height=True):
- btn_download_ckpt = gr.Button("Download CKPT")
- btn_to_pt = gr.Button("Convert to PT")
- btn_push_pt = gr.Button("Push PT to 🤗")
- btn_delete_pt = gr.Button("Delete PT")
- btn_download_ckpt.click(download_ckpt, inputs=[text_ckpt_url], outputs=out_pt)
- btn_to_pt.click(to_pt, outputs=out_pt)
- btn_push_pt.click(push_pt, inputs=[text_pt_model_to, text_pt_token, text_pt_branch], outputs=out_pt)
- btn_delete_pt.click(delete_pt, outputs=out_pt)
- gr.Markdown(
- """
- ### pytorch to flax
- pt_model_from = prompthero/openjourney
- flax_model_to = camenduru/openjourney
- branch = flax
- token = get from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) new token role=write
- """)
-
- with gr.Group():
- with gr.Box():
- with gr.Row().style(equal_height=True):
- text_pt_model_from = gr.Textbox(show_label=False, max_lines=1, placeholder="pt_model_from")
- text_flax_model_to = gr.Textbox(show_label=False, max_lines=1, placeholder="flax_model_to")
- text_flax_branch = gr.Textbox(show_label=False, value="flax", max_lines=1, placeholder="flax_branch")
- text_flax_token = gr.Textbox(show_label=False, max_lines=1, placeholder="🤗 token")
- out_flax = gr.Textbox(show_label=False)
- with gr.Row().style(equal_height=True):
- btn_clone_pt = gr.Button("Clone PT from 🤗")
- btn_to_flax = gr.Button("Convert to Flax")
- btn_push_flax = gr.Button("Push Flax to 🤗")
- btn_delete_flax = gr.Button("Delete Flax")
- btn_clone_pt.click(clone_pt, inputs=[text_pt_model_from], outputs=out_flax)
- btn_to_flax.click(to_flax, outputs=out_flax)
- btn_push_flax.click(push_flax, inputs=[text_flax_model_to, text_flax_token, text_flax_branch], outputs=out_flax)
- btn_delete_flax.click(delete_flax, outputs=out_flax)
-
- gr.Markdown(
- """
- ### pytorch to ckpt
- pt_model_from = prompthero/openjourney
- ckpt_name = openjourney
- ckpt_model_to = camenduru/openjourney
- branch = ckpt
- token = get from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) new token role=write
- """)
- with gr.Group():
- with gr.Box():
- with gr.Row().style(equal_height=True):
- text_pt_model_from = gr.Textbox(show_label=False, max_lines=1, placeholder="pt_model_from")
- text_ckpt_name = gr.Textbox(show_label=False, max_lines=1, placeholder="ckpt_name")
- text_ckpt_model_to = gr.Textbox(show_label=False, max_lines=1, placeholder="ckpt_model_to")
- text_ckpt_branch = gr.Textbox(show_label=False, value="ckpt", max_lines=1, placeholder="ckpt_branch")
- text_ckpt_token = gr.Textbox(show_label=False, max_lines=1, placeholder="🤗 token")
- out_ckpt = gr.Textbox(show_label=False)
- with gr.Row().style(equal_height=True):
- btn_clone_pt = gr.Button("Clone PT from 🤗")
- btn_to_ckpt = gr.Button("Convert to CKPT")
- btn_push_ckpt = gr.Button("Push CKPT to 🤗")
- btn_delete_ckpt = gr.Button("Delete CKPT")
- btn_clone_pt.click(clone_pt, inputs=[text_pt_model_from], outputs=out_ckpt)
- btn_to_ckpt.click(to_ckpt, inputs=[text_ckpt_name], outputs=out_ckpt)
- btn_push_ckpt.click(push_ckpt, inputs=[text_ckpt_model_to, text_ckpt_token, text_ckpt_branch], outputs=out_ckpt)
- btn_delete_ckpt.click(delete_ckpt, outputs=out_ckpt)
- gr.Markdown(
- """
- ### ckpt to safetensors
- ckpt_url = https://civitai.com/api/download/models/4224 or https://huggingface.co/prompthero/openjourney/resolve/main/mdjrny-v4.ckpt or https://drive.google.com/file/d/file-id/view?usp=share_link
- safetensors_name = openjourney
- safetensors_model_to = camenduru/openjourney
- branch = safetensors
- token = get from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) new token role=write
- """)
- with gr.Group():
- with gr.Box():
- with gr.Row().style(equal_height=True):
- text_ckpt_url_2 = gr.Textbox(show_label=False, max_lines=1, placeholder="ckpt_url_2")
- text_ckpt_name_2 = gr.Textbox(show_label=False, max_lines=1, placeholder="ckpt_name_2")
- text_safetensors_name_2 = gr.Textbox(show_label=False, max_lines=1, placeholder="safetensors_name_2")
- text_safetensors_model_to = gr.Textbox(show_label=False, max_lines=1, placeholder="safetensors_model_to")
- text_safetensors_branch = gr.Textbox(show_label=False, value="safetensors", max_lines=1, placeholder="safetensors_branch")
- text_safetensors_token = gr.Textbox(show_label=False, max_lines=1, placeholder="🤗 token")
- out_safetensors = gr.Textbox(show_label=False)
- with gr.Row().style(equal_height=True):
- btn_download_ckpt_1 = gr.Button("Download CKPT")
- btn_to_safetensors = gr.Button("Convert to Safetensors")
- btn_push_safetensors = gr.Button("Push Safetensors to 🤗")
- btn_delete_safetensors = gr.Button("Delete Safetensors")
- btn_download_ckpt_1.click(download_ckpt_1, inputs=[text_ckpt_url_2, text_ckpt_name_2], outputs=out_safetensors)
- btn_to_safetensors.click(to_safetensors, inputs=[text_ckpt_name_2, text_safetensors_name_2], outputs=out_safetensors)
- btn_push_safetensors.click(push_safetensors, inputs=[text_safetensors_model_to, text_safetensors_token, text_safetensors_branch], outputs=out_safetensors)
- btn_delete_safetensors.click(delete_safetensors, outputs=out_safetensors)
- gr.Markdown(
- """
- ### safetensors to ckpt
- safetensors_url = https://civitai.com/api/download/models/4224 or https://huggingface.co/prompthero/openjourney/resolve/main/mdjrny-v4.ckpt or https://drive.google.com/file/d/file-id/view?usp=share_link
- ckpt_name = openjourney
- ckpt_model_to = camenduru/openjourney
- branch = ckpt
- token = get from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens) new token role=write
- """)
- with gr.Group():
- with gr.Box():
- with gr.Row().style(equal_height=True):
- text_safetensors_url = gr.Textbox(show_label=False, max_lines=1, placeholder="safetensors_url")
- text_safetensors_name = gr.Textbox(show_label=False, max_lines=1, placeholder="safetensors_name")
- text_safetensors_to_ckpt_name = gr.Textbox(show_label=False, max_lines=1, placeholder="ckpt_name")
- text_safetensors_to_ckpt_model_to = gr.Textbox(show_label=False, max_lines=1, placeholder="ckpt_model_to")
- text_safetensors_to_ckpt_branch = gr.Textbox(show_label=False, value="ckpt", max_lines=1, placeholder="ckpt_branch")
- text_safetensors_to_ckpt_token = gr.Textbox(show_label=False, max_lines=1, placeholder="🤗 token")
- out_safetensors_to_ckpt = gr.Textbox(show_label=False)
- with gr.Row().style(equal_height=True):
- btn_download_safetensors = gr.Button("Download Safetensors")
- btn_safetensors_to_ckpt = gr.Button("Convert to CKPT")
- btn_push_safetensors_to_ckpt = gr.Button("Push CKPT to 🤗")
- btn_delete_safetensors_ckpt = gr.Button("Delete CKPT")
- btn_download_safetensors.click(download_safetensors, inputs=[text_safetensors_url, text_safetensors_name], outputs=out_safetensors_to_ckpt)
- btn_safetensors_to_ckpt.click(from_safetensors_to_ckpt, inputs=[text_safetensors_name, text_safetensors_to_ckpt_name], outputs=out_safetensors_to_ckpt)
- btn_push_safetensors_to_ckpt.click(push_ckpt, inputs=[text_safetensors_to_ckpt_model_to, text_safetensors_to_ckpt_token, text_safetensors_to_ckpt_branch], outputs=out_safetensors_to_ckpt)
- btn_delete_safetensors_ckpt.click(delete_ckpt, outputs=out_safetensors_to_ckpt)
-
-
-block.launch()
\ No newline at end of file
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Courselab 2.7 Full.md b/spaces/usbethFlerru/sovits-modelsV2/example/Courselab 2.7 Full.md
deleted file mode 100644
index 22e2d77206c6b385abf3c3a059bbe70069a8512a..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/Courselab 2.7 Full.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-the course planning and creation features in courselab are intended for use in the classroom only. you will need to have a courselab account in order to download and update the software and to create courses. however, if you are downloading courselab with the intent of using it outside of the classroom, you may not need to have an account. you can download and install the software for free from our website.
-Courselab 2.7 Full Download Zip ✺ https://urlcod.com/2uyXnu
-the trial version is being updated to v2.7. you will need to buy the full version of courselab if you want to create lms courses with all the features that courselab is capable of delivering. a student may use coursework for 6 months without purchasing the full version. after the end of the 6-month trial period, the full version can be purchased with a credit card.
-the advantage of the free trial version is that the student can get familiar with the interface, which is one of the most important factors when choosing a lms for your institution. the full version allows you to create courses in the following languages: english, french, german, and spanish. the full version has more modules that the free version, and you can create units of study, assignment templates, quizzes, tests, and more. in addition, there is a database of over 1 million questions that you can use as a learning and assessment resource, courselab was developed for use by instructors and learning technologists.
-the free version of courselab was developed to be used by instructors and learning technologists. it is fully functional but cannot be used for commercial purposes. a student can use courselab for six months without purchasing the full version. after the end of the 6-month trial period, the full version can be purchased with a credit card.
-
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/examples/YOLOv8-ONNXRuntime/README.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/examples/YOLOv8-ONNXRuntime/README.md
deleted file mode 100644
index b206b2e2be97aec2255915c13ef3358ce450a4ba..0000000000000000000000000000000000000000
--- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/examples/YOLOv8-ONNXRuntime/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# YOLOv8 - ONNX Runtime
-
-This project implements YOLOv8 using ONNX Runtime.
-
-## Installation
-
-To run this project, you need to install the required dependencies. The following instructions will guide you through the installation process.
-
-### Installing Required Dependencies
-
-You can install the required dependencies by running the following command:
-
-```bash
-pip install -r requirements.txt
-```
-
-### Installing `onnxruntime-gpu`
-
-If you have an NVIDIA GPU and want to leverage GPU acceleration, you can install the onnxruntime-gpu package using the following command:
-
-```bash
-pip install onnxruntime-gpu
-```
-
-Note: Make sure you have the appropriate GPU drivers installed on your system.
-
-### Installing `onnxruntime` (CPU version)
-
-If you don't have an NVIDIA GPU or prefer to use the CPU version of onnxruntime, you can install the onnxruntime package using the following command:
-
-```bash
-pip install onnxruntime
-```
-
-### Usage
-
-After successfully installing the required packages, you can run the YOLOv8 implementation using the following command:
-
-```bash
-python main.py --model yolov8n.onnx --img image.jpg --conf-thres 0.5 --iou-thres 0.5
-```
-
-Make sure to replace yolov8n.onnx with the path to your YOLOv8 ONNX model file, image.jpg with the path to your input image, and adjust the confidence threshold (conf-thres) and IoU threshold (iou-thres) values as needed.
diff --git a/spaces/valurank/keyword-extraction-demo/app.py b/spaces/valurank/keyword-extraction-demo/app.py
deleted file mode 100644
index 36408e2329f0d7099b1e9dd1adaaa4cbd719ecfc..0000000000000000000000000000000000000000
--- a/spaces/valurank/keyword-extraction-demo/app.py
+++ /dev/null
@@ -1,215 +0,0 @@
-import time
-import pandas as pd
-
-import torch
-from keybert import KeyBERT
-from sentence_transformers import SentenceTransformer
-from keyphrase_vectorizers import KeyphraseCountVectorizer
-from transformers import T5ForConditionalGeneration,T5Tokenizer
-#from fastT5 import export_and_get_onnx_model, set_auth_token
-
-import nltk
-from nltk.tokenize import sent_tokenize
-
-from huggingface_hub import snapshot_download, HfFolder
-import streamlit as st
-
-import traceback
-import logging
-
-logger = logging.getLogger(__name__)
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-HfFolder.save_token(st.secrets["hf-auth-token"])
-
-@st.cache(allow_output_mutation=True)
-def load_base_model():
- try:
- nltk.download('stopwords')
- nltk.download('punkt')
- # Load KeyBert Model
- tmp_model = SentenceTransformer('valurank/MiniLM-L6-Keyword-Extraction', use_auth_token=True)
- kw_extractor = KeyBERT(tmp_model)
-
- # Load T5 for Paraphrasing
- t5_tokenizer = T5Tokenizer.from_pretrained('t5-base')
- t5_model = T5ForConditionalGeneration.from_pretrained('valurank/t5-paraphraser', use_auth_token=True)
- t5_model = t5_model.to(device)
- return kw_extractor, t5_model, t5_tokenizer
- except Exception:
- st.error('Error Loading Models. Please contact admin')
- logger.error(traceback.format_exc())
-
-
-
-def get_keybert_results_with_vectorizer(text, number_of_results=20):
- try:
- keywords = kw_extractor.extract_keywords(text, vectorizer=KeyphraseCountVectorizer(), stop_words=None, top_n=number_of_results)
- keywords = [i for i in keywords if i[1] >= 0.25]
- return keywords
- except Exception:
- st.error('Error running Keybert. Please contact admin')
- logger.error(traceback.format_exc())
-
-
-
-def t5_paraphraser(text, number_of_results=5):
- try:
- text = "paraphrase: " + text
- encoding = t5_tokenizer.encode_plus(text, pad_to_max_length=True, return_tensors="pt")
- input_ids, attention_masks = encoding["input_ids"].to(device), encoding["attention_mask"].to(device)
-
- beam_outputs = t5_model.generate(
- input_ids=input_ids, attention_mask=attention_masks,
- do_sample=True,
- max_length=1024,
- top_k=50,
- top_p=0.95,
- early_stopping=True,
- num_return_sequences=number_of_results
- )
-
- final_outputs =[]
- for beam_output in beam_outputs:
- sent = t5_tokenizer.decode(beam_output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
- final_outputs.append(sent)
- return final_outputs
- except Exception:
- st.error('Error running T5 Paraphrasing. Please contact admin')
- logger.error(traceback.format_exc())
-
-
-
-def run_long_extraction(article, number_of_paraphrases):
- try:
- start1 = time.time()
- with st.spinner('Extraction Keywords from Original Document...'):
- original_keywords = get_keybert_results_with_vectorizer(article, number_of_results=30)
- article_sentences = sent_tokenize(article)
- target_sentences = [sent for sent in article_sentences if any(kw[0] in sent for kw in original_keywords)]
-
- st.success('Keyword Extraction from Original Document finished in {}'.format(time.time() - start1))
- st.info(f'Total Sentences in Article : {len(article_sentences)}')
- st.info(f'Total Target Sentences Selected : {len(target_sentences)}')
-
- start2 = time.time()
- with st.spinner('Extracting Keywords from Paraphrased Target Sentences...'):
- t5_paraphrasing_keywords = []
- for sent in target_sentences:
- ### T5
- t5_paraphrased = t5_paraphraser(sent, number_of_results = number_of_paraphrases)
- t5_keywords = [get_keybert_results_with_vectorizer(i) for i in t5_paraphrased]
- t5_keywords = [(word[0], word[1]) for s in t5_keywords for word in s]
- t5_paraphrasing_keywords.extend(t5_keywords)
- st.success('Keyword Extraction from Paraphrased Target Sentences finished in {}'.format(time.time() - start2))
-
- original_keywords_df = pd.DataFrame(original_keywords, columns=['Keyword', 'Score'])
-
- t5_keywords_df = pd.DataFrame(t5_paraphrasing_keywords, columns=['Keyword', 'Score']).sort_values(by='Score', ascending=False).drop_duplicates(subset=['Keyword'], keep='first').reset_index(drop=True)
-
- unique_keywords_df = pd.DataFrame([i for i in t5_paraphrasing_keywords if not original_keywords_df['Keyword'].str.contains(i[0], regex=False, case=False).any()], columns=['Keyword', 'Score']).sort_values(by='Score', ascending=False).drop_duplicates(subset=['Keyword'], keep='first').reset_index(drop=True)
-
- total_end = time.time()-start1
-
- return t5_keywords_df, original_keywords_df, unique_keywords_df, total_end
- except Exception:
- st.error('Error running Extraction Pipeline. Please contact admin')
- logger.error(traceback.format_exc())
-
-
-
-def run_short_extraction(article, number_of_paraphrases):
- try:
- start1 = time.time()
- original_keywords = get_keybert_results_with_vectorizer(article)
- article_sentences = sent_tokenize(article)
- st.info(f'Total Sentences in Article : {len(article_sentences)}')
-
- target_sentences = []
- tmp = []
- token_count = 0
- for i in article_sentences:
- enc = t5_tokenizer.encode(i)
- if token_count + len(enc) <= 96:
- tmp.append(i)
- token_count += len(enc)
- else:
- target_sentences.append(' '.join(tmp))
- token_count = len(enc)
- tmp = [i]
-
- start2 = time.time()
- with st.spinner('Extracting Keywords from Paraphrased Sentences Groups...'):
- t5_paraphrasing_keywords = []
- for sent in target_sentences:
- ### T5
- t5_paraphrased = t5_paraphraser(sent, number_of_results = number_of_paraphrases)
- t5_keywords = [get_keybert_results_with_vectorizer(i) for i in t5_paraphrased]
- t5_keywords = [(word[0], word[1]) for s in t5_keywords for word in s]
- t5_paraphrasing_keywords.extend(t5_keywords)
- st.success('Keyword Extraction from Paraphrased Grouped Sentences finished in {}'.format(time.time() - start2))
-
- original_keywords_df = pd.DataFrame(original_keywords, columns=['Keyword', 'Score'])
-
- t5_keywords_df = pd.DataFrame(t5_paraphrasing_keywords, columns=['Keyword', 'Score']).sort_values(by='Score', ascending=False).drop_duplicates(subset=['Keyword'], keep='first').reset_index(drop=True)
-
- unique_keywords_df = pd.DataFrame([i for i in t5_paraphrasing_keywords if not original_keywords_df['Keyword'].str.contains(i[0], regex=False, case=False).any()], columns=['Keyword', 'Score']).sort_values(by='Score', ascending=False).drop_duplicates(subset=['Keyword'], keep='first').reset_index(drop=True)
-
- total_end = time.time()-start1
-
- return t5_keywords_df, original_keywords_df, unique_keywords_df, total_end
- except Exception:
- st.error('Error running Extraction Pipeline. Please contact admin')
- logger.error(traceback.format_exc())
-
-
-
-
-def check_document_length(article, number_of_paraphrases):
- total_tokens = len(t5_tokenizer.encode(article))
- st.info(f'Token Counts for Encoded Document: {total_tokens}')
-
- if total_tokens >= 512:
- st.info('Running Extraction for Long Document')
- t5_keywords_df, original_keywords_df, unique_keywords_df, total_end = run_long_extraction(article, number_of_paraphrases)
- else:
- st.info('Running Extraction for Short Document')
- t5_keywords_df, original_keywords_df, unique_keywords_df, total_end = run_short_extraction(article, number_of_paraphrases)
-
- return t5_keywords_df, original_keywords_df, unique_keywords_df, total_end
-
-
-
-kw_extractor, t5_model, t5_tokenizer = load_base_model()
-
-
-st.title('Exhaustive Keyword Extraction with Paraphrasing')
-with st.sidebar:
- st.header('Overview')
- st.markdown('This demo allows users to input text article and generate synonym-aware keywords. The pipeline includes the use of T5 Model for paraphrasing target sentences, and Sentence-transformers based Keyword Extraction')
-
- st.header('Parameters')
- # number_of_keywords = st.slider('Number of Keywords to extract for each target sentence', min_value=5, max_value=50, step=5, value=20)
- number_of_paraphrases = st.slider('Number of Paraphrased versions to generate for each target sentence', min_value=1, max_value=20, step=1, value=5)
-
- st.header('Specifications')
- # st.markdown('To generate context aware and OOV keywords for long, we first run KeyBert for keyword extraction on the original article. The sentences which had Keywords are then passed through T5 for generating multiple paraphrased versions. These paraphrased sentences are then run through Keyword Extraction again to generate the final results')
-
-
-
-doc = st.text_area("Enter a custom document")
-if doc:
- t5_keywords_df, original_keywords_df, unique_keywords_df, total_end = check_document_length(doc, number_of_paraphrases)
-
- # extract_paraphrased_article(input_list[0])
- st.text(f'PIPELINE RUNTIME: {total_end}\n')
-
- st.subheader('\nOriginal Keywords Extracted:\n\n')
- st.dataframe(original_keywords_df)
-
- st.subheader('\nT5 Unique New Keywords Extracted:\n\n')
- st.dataframe(unique_keywords_df)
-
- st.subheader('\nT5 Keywords Extracted:\n\n')
- st.dataframe(t5_keywords_df)
diff --git a/spaces/vivym/image-matting-app/ppmatting/models/backbone/__init__.py b/spaces/vivym/image-matting-app/ppmatting/models/backbone/__init__.py
deleted file mode 100644
index b08005b31477e57488132cd2f5d3692c6e687b4f..0000000000000000000000000000000000000000
--- a/spaces/vivym/image-matting-app/ppmatting/models/backbone/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .mobilenet_v2 import *
-from .hrnet import *
-from .resnet_vd import *
-from .vgg import *
-from .gca_enc import *
\ No newline at end of file
diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py
deleted file mode 100644
index a33e7972877f902d0e7d18401ca675e3e4e60a18..0000000000000000000000000000000000000000
--- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained=None,
- backbone=dict(
- type='UNet',
- in_channels=3,
- base_channels=64,
- num_stages=5,
- strides=(1, 1, 1, 1, 1),
- enc_num_convs=(2, 2, 2, 2, 2),
- dec_num_convs=(2, 2, 2, 2),
- downsamples=(True, True, True, True),
- enc_dilations=(1, 1, 1, 1, 1),
- dec_dilations=(1, 1, 1, 1),
- with_cp=False,
- conv_cfg=None,
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'),
- upsample_cfg=dict(type='InterpConv'),
- norm_eval=False),
- decode_head=dict(
- type='FCNHead',
- in_channels=64,
- in_index=4,
- channels=64,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=2,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=128,
- in_index=3,
- channels=64,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=2,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='slide', crop_size=256, stride=170))
diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
deleted file mode 100644
index a0b6b345640a895368ac8a647afef6f24333d90e..0000000000000000000000000000000000000000
--- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .base import LoggerHook
-from .dvclive import DvcliveLoggerHook
-from .mlflow import MlflowLoggerHook
-from .neptune import NeptuneLoggerHook
-from .pavi import PaviLoggerHook
-from .tensorboard import TensorboardLoggerHook
-from .text import TextLoggerHook
-from .wandb import WandbLoggerHook
-
-__all__ = [
- 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook',
- 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook',
- 'NeptuneLoggerHook', 'DvcliveLoggerHook'
-]
diff --git a/spaces/vumichien/canvas_controlnet/ldm/models/diffusion/sampling_util.py b/spaces/vumichien/canvas_controlnet/ldm/models/diffusion/sampling_util.py
deleted file mode 100644
index 7eff02be6d7c54d43ee6680636ac0698dd3b3f33..0000000000000000000000000000000000000000
--- a/spaces/vumichien/canvas_controlnet/ldm/models/diffusion/sampling_util.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import torch
-import numpy as np
-
-
-def append_dims(x, target_dims):
- """Appends dimensions to the end of a tensor until it has target_dims dimensions.
- From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py"""
- dims_to_append = target_dims - x.ndim
- if dims_to_append < 0:
- raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
- return x[(...,) + (None,) * dims_to_append]
-
-
-def norm_thresholding(x0, value):
- s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
- return x0 * (value / s)
-
-
-def spatial_norm_thresholding(x0, value):
- # b c h w
- s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
- return x0 * (value / s)
\ No newline at end of file
diff --git a/spaces/wf-genius/Control-A-Video/model/video_diffusion/models/resnet.py b/spaces/wf-genius/Control-A-Video/model/video_diffusion/models/resnet.py
deleted file mode 100644
index 708487062dca40d4942cabb2ad02f63072147f88..0000000000000000000000000000000000000000
--- a/spaces/wf-genius/Control-A-Video/model/video_diffusion/models/resnet.py
+++ /dev/null
@@ -1,486 +0,0 @@
-# Copyright 2023 Bytedance Ltd. and/or its affiliates
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from functools import partial
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from einops import rearrange
-
-
-class PseudoConv3d(nn.Conv2d):
- def __init__(self, in_channels, out_channels, kernel_size, temporal_kernel_size=None, **kwargs):
- super().__init__(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- **kwargs,
- )
- if temporal_kernel_size is None:
- temporal_kernel_size = kernel_size
-
- self.conv_temporal = (
- nn.Conv1d(
- out_channels,
- out_channels,
- kernel_size=temporal_kernel_size,
- padding=temporal_kernel_size // 2,
- )
- if kernel_size > 1
- else None
- )
-
- if self.conv_temporal is not None:
- nn.init.dirac_(self.conv_temporal.weight.data) # initialized to be identity
- nn.init.zeros_(self.conv_temporal.bias.data)
-
- def forward(self, x):
- b = x.shape[0]
-
- is_video = x.ndim == 5
- if is_video:
- x = rearrange(x, "b c f h w -> (b f) c h w")
-
- x = super().forward(x)
-
- if is_video:
- x = rearrange(x, "(b f) c h w -> b c f h w", b=b)
-
- if self.conv_temporal is None or not is_video:
- return x
-
- *_, h, w = x.shape
-
- x = rearrange(x, "b c f h w -> (b h w) c f")
-
- x = self.conv_temporal(x) # 加入空间1D的时序卷积。channel不变。(建模时序信息)
-
- x = rearrange(x, "(b h w) c f -> b c f h w", h=h, w=w)
-
- return x
-
-
-class UpsamplePseudo3D(nn.Module):
- """
- An upsampling layer with an optional convolution.
-
- Parameters:
- channels: channels in the inputs and outputs.
- use_conv: a bool determining if a convolution is applied.
- use_conv_transpose:
- out_channels:
- """
-
- def __init__(
- self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"
- ):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_conv_transpose = use_conv_transpose
- self.name = name
-
- conv = None
- if use_conv_transpose:
- raise NotImplementedError
- conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1)
- elif use_conv:
- conv = PseudoConv3d(self.channels, self.out_channels, 3, padding=1)
-
- # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
- if name == "conv":
- self.conv = conv
- else:
- self.Conv2d_0 = conv
-
- def forward(self, hidden_states, output_size=None):
- assert hidden_states.shape[1] == self.channels
-
- # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
- # TODO(Suraj): Remove this cast once the issue is fixed in PyTorch
- # https://github.com/pytorch/pytorch/issues/86679
- dtype = hidden_states.dtype
- if dtype == torch.bfloat16:
- hidden_states = hidden_states.to(torch.float32)
-
- # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
- if hidden_states.shape[0] >= 64:
- hidden_states = hidden_states.contiguous()
-
- b = hidden_states.shape[0]
- is_video = hidden_states.ndim == 5
- if is_video:
- hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
-
- # if `output_size` is passed we force the interpolation output
- # size and do not make use of `scale_factor=2`
- if output_size is None:
- # 先插值再用conv
- hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
- else:
- hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
-
- # If the input is bfloat16, we cast back to bfloat16
- if dtype == torch.bfloat16:
- hidden_states = hidden_states.to(dtype)
-
- if is_video:
- hidden_states = rearrange(hidden_states, "(b f) c h w -> b c f h w", b=b)
-
- if self.use_conv:
- if self.name == "conv":
- hidden_states = self.conv(hidden_states)
- else:
- hidden_states = self.Conv2d_0(hidden_states)
-
- return hidden_states
-
-
-class DownsamplePseudo3D(nn.Module):
- """
- A downsampling layer with an optional convolution.
-
- Parameters:
- channels: channels in the inputs and outputs.
- use_conv: a bool determining if a convolution is applied.
- out_channels:
- padding:
- """
-
- def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.padding = padding
- stride = 2
- self.name = name
-
- if use_conv:
- conv = PseudoConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
- else:
- assert self.channels == self.out_channels
- conv = nn.AvgPool2d(kernel_size=stride, stride=stride)
-
- if name == "conv":
- self.Conv2d_0 = conv
- self.conv = conv
- elif name == "Conv2d_0":
- self.conv = conv
- else:
- self.conv = conv
-
- def forward(self, hidden_states):
- assert hidden_states.shape[1] == self.channels
- if self.use_conv and self.padding == 0:
- pad = (0, 1, 0, 1)
- hidden_states = F.pad(hidden_states, pad, mode="constant", value=0)
-
- assert hidden_states.shape[1] == self.channels
- if self.use_conv:
- hidden_states = self.conv(hidden_states)
- else:
- b = hidden_states.shape[0]
- is_video = hidden_states.ndim == 5
- if is_video:
- hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
- hidden_states = self.conv(hidden_states)
- if is_video:
- hidden_states = rearrange(hidden_states, "(b f) c h w -> b c f h w", b=b)
-
- return hidden_states
-
-
-class ResnetBlockPseudo3D(nn.Module):
- def __init__(
- self,
- *,
- in_channels,
- out_channels=None,
- conv_shortcut=False,
- dropout=0.0,
- temb_channels=512,
- groups=32,
- groups_out=None,
- pre_norm=True,
- eps=1e-6,
- non_linearity="swish",
- time_embedding_norm="default",
- kernel=None,
- output_scale_factor=1.0,
- use_in_shortcut=None,
- up=False,
- down=False,
- ):
- super().__init__()
- self.pre_norm = pre_norm
- self.pre_norm = True
- self.in_channels = in_channels
- out_channels = in_channels if out_channels is None else out_channels
- self.out_channels = out_channels
- self.use_conv_shortcut = conv_shortcut
- self.time_embedding_norm = time_embedding_norm
- self.up = up
- self.down = down
- self.output_scale_factor = output_scale_factor
-
- if groups_out is None:
- groups_out = groups
-
- self.norm1 = torch.nn.GroupNorm(
- num_groups=groups, num_channels=in_channels, eps=eps, affine=True
- )
-
- self.conv1 = PseudoConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
-
- if temb_channels is not None:
- if self.time_embedding_norm == "default":
- time_emb_proj_out_channels = out_channels
- elif self.time_embedding_norm == "scale_shift":
- time_emb_proj_out_channels = out_channels * 2
- else:
- raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
-
- self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)
- else:
- self.time_emb_proj = None
-
- self.norm2 = torch.nn.GroupNorm(
- num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True
- )
- self.dropout = torch.nn.Dropout(dropout)
- self.conv2 = PseudoConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
-
- if non_linearity == "swish":
- self.nonlinearity = lambda x: F.silu(x)
- elif non_linearity == "mish":
- self.nonlinearity = Mish()
- elif non_linearity == "silu":
- self.nonlinearity = nn.SiLU()
-
- self.upsample = self.downsample = None
- if self.up:
- if kernel == "fir":
- fir_kernel = (1, 3, 3, 1)
- self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel)
- elif kernel == "sde_vp":
- self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest")
- else:
- self.upsample = UpsamplePseudo3D(in_channels, use_conv=False)
- elif self.down:
- if kernel == "fir":
- fir_kernel = (1, 3, 3, 1)
- self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel)
- elif kernel == "sde_vp":
- self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2)
- else:
- self.downsample = DownsamplePseudo3D(in_channels, use_conv=False, padding=1, name="op")
-
- self.use_in_shortcut = (
- self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
- )
-
- self.conv_shortcut = None
- if self.use_in_shortcut:
- self.conv_shortcut = PseudoConv3d(
- in_channels, out_channels, kernel_size=1, stride=1, padding=0
- )
-
- def forward(self, input_tensor, temb):
- hidden_states = input_tensor
-
- hidden_states = self.norm1(hidden_states)
- hidden_states = self.nonlinearity(hidden_states)
-
- if self.upsample is not None:
- # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
- if hidden_states.shape[0] >= 64:
- input_tensor = input_tensor.contiguous()
- hidden_states = hidden_states.contiguous()
- input_tensor = self.upsample(input_tensor)
- hidden_states = self.upsample(hidden_states)
- elif self.downsample is not None:
- input_tensor = self.downsample(input_tensor)
- hidden_states = self.downsample(hidden_states)
-
- hidden_states = self.conv1(hidden_states)
-
- if temb is not None:
- temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None]
-
- if temb is not None and self.time_embedding_norm == "default":
- is_video = hidden_states.ndim == 5
- if is_video:
- b, c, f, h, w = hidden_states.shape
- hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
- temb = temb.repeat_interleave(f, 0)
-
- hidden_states = hidden_states + temb
-
- if is_video:
- hidden_states = rearrange(hidden_states, "(b f) c h w -> b c f h w", b=b)
-
- hidden_states = self.norm2(hidden_states)
-
- if temb is not None and self.time_embedding_norm == "scale_shift":
- is_video = hidden_states.ndim == 5
- if is_video:
- b, c, f, h, w = hidden_states.shape
- hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
- temb = temb.repeat_interleave(f, 0)
-
- scale, shift = torch.chunk(temb, 2, dim=1)
- hidden_states = hidden_states * (1 + scale) + shift
-
- if is_video:
- hidden_states = rearrange(hidden_states, "(b f) c h w -> b c f h w", b=b)
-
- hidden_states = self.nonlinearity(hidden_states)
-
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.conv2(hidden_states)
-
- if self.conv_shortcut is not None:
- input_tensor = self.conv_shortcut(input_tensor)
-
- output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
-
- return output_tensor
-
-
-class Mish(torch.nn.Module):
- def forward(self, hidden_states):
- return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states))
-
-
-def upsample_2d(hidden_states, kernel=None, factor=2, gain=1):
- r"""Upsample2D a batch of 2D images with the given filter.
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given
- filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified
- `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is
- a: multiple of the upsampling factor.
-
- Args:
- hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
- kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
- (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.
- factor: Integer upsampling factor (default: 2).
- gain: Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- output: Tensor of the shape `[N, C, H * factor, W * factor]`
- """
- assert isinstance(factor, int) and factor >= 1
- if kernel is None:
- kernel = [1] * factor
-
- kernel = torch.tensor(kernel, dtype=torch.float32)
- if kernel.ndim == 1:
- kernel = torch.outer(kernel, kernel)
- kernel /= torch.sum(kernel)
-
- kernel = kernel * (gain * (factor**2))
- pad_value = kernel.shape[0] - factor
- output = upfirdn2d_native(
- hidden_states,
- kernel.to(device=hidden_states.device),
- up=factor,
- pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2),
- )
- return output
-
-
-def downsample_2d(hidden_states, kernel=None, factor=2, gain=1):
- r"""Downsample2D a batch of 2D images with the given filter.
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the
- given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the
- specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its
- shape is a multiple of the downsampling factor.
-
- Args:
- hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
- kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
- (separable). The default is `[1] * factor`, which corresponds to average pooling.
- factor: Integer downsampling factor (default: 2).
- gain: Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- output: Tensor of the shape `[N, C, H // factor, W // factor]`
- """
-
- assert isinstance(factor, int) and factor >= 1
- if kernel is None:
- kernel = [1] * factor
-
- kernel = torch.tensor(kernel, dtype=torch.float32)
- if kernel.ndim == 1:
- kernel = torch.outer(kernel, kernel)
- kernel /= torch.sum(kernel)
-
- kernel = kernel * gain
- pad_value = kernel.shape[0] - factor
- output = upfirdn2d_native(
- hidden_states,
- kernel.to(device=hidden_states.device),
- down=factor,
- pad=((pad_value + 1) // 2, pad_value // 2),
- )
- return output
-
-
-def upfirdn2d_native(tensor, kernel, up=1, down=1, pad=(0, 0)):
- up_x = up_y = up
- down_x = down_y = down
- pad_x0 = pad_y0 = pad[0]
- pad_x1 = pad_y1 = pad[1]
-
- _, channel, in_h, in_w = tensor.shape
- tensor = tensor.reshape(-1, in_h, in_w, 1)
-
- _, in_h, in_w, minor = tensor.shape
- kernel_h, kernel_w = kernel.shape
-
- out = tensor.view(-1, in_h, 1, in_w, 1, minor)
- out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
- out = out.view(-1, in_h * up_y, in_w * up_x, minor)
-
- out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)])
- out = out.to(tensor.device) # Move back to mps if necessary
- out = out[
- :,
- max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
- max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
- :,
- ]
-
- out = out.permute(0, 3, 1, 2)
- out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
- w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
- out = F.conv2d(out, w)
- out = out.reshape(
- -1,
- minor,
- in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
- in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
- )
- out = out.permute(0, 2, 3, 1)
- out = out[:, ::down_y, ::down_x, :]
-
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
-
- return out.view(-1, channel, out_h, out_w)
diff --git a/spaces/wffcyrus/MetaGPT-v1/examples/search_with_specific_engine.py b/spaces/wffcyrus/MetaGPT-v1/examples/search_with_specific_engine.py
deleted file mode 100644
index 4423011e48daa6bebd00bbff62414108b8f8a1c9..0000000000000000000000000000000000000000
--- a/spaces/wffcyrus/MetaGPT-v1/examples/search_with_specific_engine.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Modified By: mashenquan, 2023-8-9, fix-bug: cannot find metagpt module.
-"""
-import asyncio
-from pathlib import Path
-import sys
-sys.path.append(str(Path(__file__).resolve().parent.parent))
-from metagpt.roles import Searcher
-from metagpt.tools import SearchEngineType
-
-
-async def main():
- # Serper API
- #await Searcher(engine = SearchEngineType.SERPER_GOOGLE).run(["What are some good sun protection products?","What are some of the best beaches?"])
- # SerpAPI
- #await Searcher(engine=SearchEngineType.SERPAPI_GOOGLE).run("What are the best ski brands for skiers?")
- # Google API
- await Searcher(engine=SearchEngineType.DIRECT_GOOGLE).run("What are the most interesting human facts?")
-
-if __name__ == '__main__':
- asyncio.run(main())
diff --git a/spaces/wliu88/StructDiffusionDemo/scripts/infer_with_discriminator.py b/spaces/wliu88/StructDiffusionDemo/scripts/infer_with_discriminator.py
deleted file mode 100644
index 3452eb7fd2acd90d4482dd388f926bbd318b3515..0000000000000000000000000000000000000000
--- a/spaces/wliu88/StructDiffusionDemo/scripts/infer_with_discriminator.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import os
-import argparse
-import torch
-import numpy as np
-import pytorch_lightning as pl
-from omegaconf import OmegaConf
-
-from StructDiffusion.data.semantic_arrangement import SemanticArrangementDataset
-from StructDiffusion.language.tokenizer import Tokenizer
-from StructDiffusion.models.pl_models import ConditionalPoseDiffusionModel, PairwiseCollisionModel
-from StructDiffusion.diffusion.sampler import SamplerV2
-from StructDiffusion.diffusion.pose_conversion import get_struct_objs_poses
-from StructDiffusion.utils.files import get_checkpoint_path_from_dir
-from StructDiffusion.utils.batch_inference import move_pc_and_create_scene_simple, visualize_batch_pcs
-
-
-def main(args, cfg):
-
- pl.seed_everything(args.eval_random_seed)
-
- device = (torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"))
-
- diffusion_checkpoint_path = get_checkpoint_path_from_dir(os.path.join(cfg.WANDB.save_dir, cfg.WANDB.project, args.diffusion_checkpoint_id, "checkpoints"))
- collision_checkpoint_path = get_checkpoint_path_from_dir(os.path.join(cfg.WANDB.save_dir, cfg.WANDB.project, args.collision_checkpoint_id, "checkpoints"))
-
- if args.eval_mode == "infer":
-
- tokenizer = Tokenizer(cfg.DATASET.vocab_dir)
- # override ignore_rgb for visualization
- cfg.DATASET.ignore_rgb = False
- dataset = SemanticArrangementDataset(split="test", tokenizer=tokenizer, **cfg.DATASET)
-
- sampler = SamplerV2(ConditionalPoseDiffusionModel, diffusion_checkpoint_path,
- PairwiseCollisionModel, collision_checkpoint_path, device)
-
- data_idxs = np.random.permutation(len(dataset))
- for di in data_idxs:
- raw_datum = dataset.get_raw_data(di)
- print(tokenizer.convert_structure_params_to_natural_language(raw_datum["sentence"]))
- datum = dataset.convert_to_tensors(raw_datum, tokenizer)
- batch = dataset.single_datum_to_batch(datum, args.num_samples, device, inference_mode=True)
-
- num_poses = datum["goal_poses"].shape[0]
- struct_pose, pc_poses_in_struct = sampler.sample(batch, num_poses)
-
- new_obj_xyzs = move_pc_and_create_scene_simple(batch["pcs"], struct_pose, pc_poses_in_struct)
- visualize_batch_pcs(new_obj_xyzs, args.num_samples, limit_B=10, trimesh=True)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="infer")
- parser.add_argument("--base_config_file", help='base config yaml file',
- default='../configs/base.yaml',
- type=str)
- parser.add_argument("--config_file", help='config yaml file',
- default='../configs/conditional_pose_diffusion.yaml',
- type=str)
- parser.add_argument("--diffusion_checkpoint_id",
- default="ConditionalPoseDiffusion",
- type=str)
- parser.add_argument("--collision_checkpoint_id",
- default="curhl56k",
- type=str)
- parser.add_argument("--eval_mode",
- default="infer",
- type=str)
- parser.add_argument("--eval_random_seed",
- default=42,
- type=int)
- parser.add_argument("--num_samples",
- default=10,
- type=int)
- args = parser.parse_args()
-
- base_cfg = OmegaConf.load(args.base_config_file)
- cfg = OmegaConf.load(args.config_file)
- cfg = OmegaConf.merge(base_cfg, cfg)
-
- main(args, cfg)
-
-
diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/main.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/main.py
deleted file mode 100644
index 53ef6ac570194a136023c8b602759bc3792691db..0000000000000000000000000000000000000000
--- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/main.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""
- Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
-
- Xuanmeng Zhang, Minyue Jiang, Zhedong Zheng, Xiao Tan, Errui Ding, Yi Yang
-
- Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking
-
- Paper: https://arxiv.org/abs/2012.07620v2
-
- ======================================================================
-
- On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms
- with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe
- that our method achieves comparable or even better retrieval results on the other four
- image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652,
- with limited time cost.
-"""
-
-import os
-import numpy as np
-import argparse
-import torch
-
-from utils import *
-from gnn_reranking import *
-
-parser = argparse.ArgumentParser(description='Reranking_is_GNN')
-parser.add_argument(
- '--data_path',
- type=str,
- default='../xm_rerank_gpu_2/features/market_88_test.pkl',
- help='path to dataset'
-)
-parser.add_argument(
- '--k1',
- type=int,
- default=26, # Market-1501
- # default=60, # Veri-776
- help='parameter k1'
-)
-parser.add_argument(
- '--k2',
- type=int,
- default=7, # Market-1501
- # default=10, # Veri-776
- help='parameter k2'
-)
-
-args = parser.parse_args()
-
-
-def main():
- data = load_pickle(args.data_path)
-
- query_cam = data['query_cam']
- query_label = data['query_label']
- gallery_cam = data['gallery_cam']
- gallery_label = data['gallery_label']
-
- gallery_feature = torch.FloatTensor(data['gallery_f'])
- query_feature = torch.FloatTensor(data['query_f'])
- query_feature = query_feature.cuda()
- gallery_feature = gallery_feature.cuda()
-
- indices = gnn_reranking(query_feature, gallery_feature, args.k1, args.k2)
- evaluate_ranking_list(
- indices, query_label, query_cam, gallery_label, gallery_cam
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/xl2533/FinDoc/build_index/unit_test/test_loader.py b/spaces/xl2533/FinDoc/build_index/unit_test/test_loader.py
deleted file mode 100644
index 1868e1b8af4eaafbc633df6daabe7d5b3ebcf710..0000000000000000000000000000000000000000
--- a/spaces/xl2533/FinDoc/build_index/unit_test/test_loader.py
+++ /dev/null
@@ -1 +0,0 @@
-# -*-coding:utf-8 -*-
\ No newline at end of file
diff --git a/spaces/xl2533/MakeInstruction/ape/prompt.py b/spaces/xl2533/MakeInstruction/ape/prompt.py
deleted file mode 100644
index bcae5f65379e93d629ebdfecd6e24715806b1c9b..0000000000000000000000000000000000000000
--- a/spaces/xl2533/MakeInstruction/ape/prompt.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*-coding:utf-8 -*-
-few_shot_prompt = "输入:{input}\n输出:{output}\n"
-
-gen_user_prompt = "我给你提供了一个任务指令,基于指令你生成了以下的输入-输出对\n\n{few_shot}这个任务描述是"
-
-gen_sys_prompt = ""
-
-eval_prompt = "任务描述:{instruction}\n输入:{input}\n输出:{output}"
-
-test_prompt = "任务描述:{instruction}\n输入:{input}\n输出:"
-
-MyTemplate = {
- 'gen_user_prompt': gen_user_prompt,
- 'gen_sys_prompt': gen_sys_prompt,
- 'eval_prompt': eval_prompt,
- 'few_shot_prompt': few_shot_prompt,
- 'test_prompt': test_prompt
-}
diff --git a/spaces/xly66624/Brayton-cycle/RCP.py b/spaces/xly66624/Brayton-cycle/RCP.py
deleted file mode 100644
index 449886eae4025321d18203ee25cf081cd6f7ced2..0000000000000000000000000000000000000000
--- a/spaces/xly66624/Brayton-cycle/RCP.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import CoolProp.CoolProp as cp
-import numpy as np
-import math as mt
-
-class RCP:
- def __init__(self, th1:float,th2:float,tc1:float,tc2:float,qmh:float,qmc:float,ph:float,pc:float,fluid:str,HA:float = 0,l:float = 0.2): #输入四个温度、压力与流量,尺寸长度有默认值可以不输入,HA不输入将采用对数平均温差计算
- self.th1 = th1
- self.th2 = th2
- self.tc1 = tc1
- self.tc2 = tc2
- self.qmh = qmh
- self.qmc = qmc
- self.ph = ph
- self.pc = pc
- self.fluid = fluid
- self.l = l
- self.HA = HA
- denh = cp.PropsSI('D', 'P', self.pc, 'T', self.th1, self.fluid)
- denc = cp.PropsSI('D', 'P', self.ph, 'T', self.tc1, self.fluid)
- self.qvh = qmh/denh
- self.qvc = qmc/denc
- self.flag = 0
- while 1 :
- self.Calculate()
- if self.N > 1600 and (self.uh > 10 or self.uc > 10) :
- print("Parameters Error")
- break
- if self.N < 1600 and self.uh < 10 and self.uc < 10 :
- if self.uh > 8 or self.uc > 8 :
- self.flag = 1
- break
- else :
- self.l = self.l + 0.01
- if self.N > 1600 :
- self.l = self.l + 0.01
- if self.uh > 10 or self.uc > 10 :
- self.l = self.l - 0.01
-
-
- def Calculate(self):
- self.thav = (self.th1 + self.th2)/2
- self.tcav = (self.tc1 + self.tc2)/2
- self.deltat1 = self.th1 - self.tc2 #进口温差
- self.deltat2 = self.th2 - self.tc1 #出口温差
- self.deltath = self.th1 - self.th2
- self.deltatm = (self.deltat1 - self.deltat2)/np.log(self.deltat1/self.deltat2) #对数平均温差
- self.denh = cp.PropsSI('D', 'P', self.ph, 'T', self.thav, self.fluid)
- self.denc = cp.PropsSI('D', 'P', self.pc, 'T', self.tcav, self.fluid)
- self.ch = cp.PropsSI('Cpmass', 'P', self.ph, 'T', self.thav, self.fluid)
- self.cc = cp.PropsSI('Cpmass', 'P', self.pc, 'T', self.tcav, self.fluid)
- self.Q = self.qmh*(cp.PropsSI('H', 'P', self.ph, 'T', self.th1, self.fluid) - cp.PropsSI('H', 'P', self.ph, 'T', self.th2, self.fluid))
-
- self.deq = 1.1e-3
- self.d, self.R = 1.8e-3, 0.9e-3
-
- if(self.HA == 0): #对数平均温差法
- self.H = 5000
- self.A = 0
- tempH = 0
- while 1 :
- tempH = self.H
- self.A = self.Q/(self.H*self.deltatm)
- self.N = round(self.A/(self.d*self.l+mt.pi*self.R*self.l), 0)
- self.Ah = 0.5*mt.pi*self.R**2*self.N
- self.Ac = self.Ah
- self.uh = self.qvh/self.Ah
- self.uc = self.qvc/self.Ac
- self.lamh = cp.PropsSI('L', 'P', self.ph, 'T', self.thav, self.fluid)
- self.Vh = cp.PropsSI('V', 'P', self.ph, 'T', self.thav, self.fluid)
- self.Prh = self.Vh*self.ch/self.lamh
- self.hh = 0.023 * self.lamh * (self.denh*self.uh/self.Vh)**0.8 * self.Prh**0.3 / self.deq**0.2
- self.lamc = cp.PropsSI('L', 'P', self.pc, 'T', self.tcav, self.fluid)
- self.Vc = cp.PropsSI('V', 'P', self.pc, 'T', self.tcav, self.fluid)
- self.Prc = self.Vc*self.cc/self.lamc
- self.hc = 0.023 * self.lamc * (self.denc*self.uc/self.Vc)**0.8 * self.Prc**0.4 / self.deq**0.2
- self.H = 1/(1/self.hh + 1/self.hc)
- if abs(self.H - tempH)<0.1 :
- self.Reh = self.denh*self.uh*self.deq/self.Vh
- self.Rec = self.denc*self.uc*self.deq/self.Vc
- self.V = 2.5*3.3e-6*self.l*self.N
- break
- else: #HA法
- self.H = 5000
- self.A = 0
- tempH = 0
- while 1 :
- tempH = self.H
- self.A = self.HA/self.H
- self.N = round(self.A/(self.d*self.l+mt.pi*self.R*self.l), 0)
- self.Ah = 0.5*mt.pi*self.R**2*self.N
- self.Ac = self.Ah
- self.uh = self.qvh/self.Ah
- self.uc = self.qvc/self.Ac
- self.lamh = cp.PropsSI('L', 'P', self.ph, 'T', self.thav, self.fluid)
- self.Vh = cp.PropsSI('V', 'P', self.ph, 'T', self.thav, self.fluid)
- self.Prh = self.Vh*self.ch/self.lamh
- self.hh = 0.023 * self.lamh * (self.denh*self.uh/self.Vh)**0.8 * self.Prh**0.3 / self.deq**0.2
- self.lamc = cp.PropsSI('L', 'P', self.pc, 'T', self.tcav, self.fluid)
- self.Vc = cp.PropsSI('V', 'P', self.pc, 'T', self.tcav, self.fluid)
- self.Prc = self.Vc*self.cc/self.lamc
- self.hc = 0.023 * self.lamc * (self.denc*self.uc/self.Vc)**0.8 * self.Prc**0.4 / self.deq**0.2
- self.H = 1/(1/self.hh + 1/self.hc)
- if abs(self.H - tempH)<0.1 :
- self.Reh = self.denh*self.uh*self.deq/self.Vh
- self.Rec = self.denc*self.uc*self.deq/self.Vc
- self.V = 2.5*3.3e-6*self.l*self.N
- break
-
-
- def Display(self): #调用该函数以输出设计参数
- if self.flag :
- print("The RCP's Parameters:\n")
- print("H =",self.H,"A =",self.A,"N =",self.N, "l =", self.l, "\n")
- print("Reh =",self.Reh,"Rec =",self.Rec,"\n")
- print("uh =",self.uh,"uc =",self.uc,"\n")
- print("Estimated Volume =", self.V)
- else :
- print("Error!Manual Checking Required")
-
- def Checking(self): #检查设计成功与否
- if self.flag :
- print("Success")
- else :
- print("Failure")
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/util/util.py b/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/util/util.py
deleted file mode 100644
index e18b4a26082449977b27a4c1506649a2447988b1..0000000000000000000000000000000000000000
--- a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/util/util.py
+++ /dev/null
@@ -1,210 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-
-import re
-import importlib
-import torch
-from argparse import Namespace
-import numpy as np
-from PIL import Image
-import os
-import argparse
-import dill as pickle
-
-
-def save_obj(obj, name):
- with open(name, "wb") as f:
- pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
-
-
-def load_obj(name):
- with open(name, "rb") as f:
- return pickle.load(f)
-
-
-def copyconf(default_opt, **kwargs):
- conf = argparse.Namespace(**vars(default_opt))
- for key in kwargs:
- print(key, kwargs[key])
- setattr(conf, key, kwargs[key])
- return conf
-
-
-# Converts a Tensor into a Numpy array
-# |imtype|: the desired type of the converted numpy array
-def tensor2im(image_tensor, imtype=np.uint8, normalize=True, tile=False):
- if isinstance(image_tensor, list):
- image_numpy = []
- for i in range(len(image_tensor)):
- image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
- return image_numpy
-
- if image_tensor.dim() == 4:
- # transform each image in the batch
- images_np = []
- for b in range(image_tensor.size(0)):
- one_image = image_tensor[b]
- one_image_np = tensor2im(one_image)
- images_np.append(one_image_np.reshape(1, *one_image_np.shape))
- images_np = np.concatenate(images_np, axis=0)
-
- return images_np
-
- if image_tensor.dim() == 2:
- image_tensor = image_tensor.unsqueeze(0)
- image_numpy = image_tensor.detach().cpu().float().numpy()
- if normalize:
- image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
- else:
- image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
- image_numpy = np.clip(image_numpy, 0, 255)
- if image_numpy.shape[2] == 1:
- image_numpy = image_numpy[:, :, 0]
- return image_numpy.astype(imtype)
-
-
-# Converts a one-hot tensor into a colorful label map
-def tensor2label(label_tensor, n_label, imtype=np.uint8, tile=False):
- if label_tensor.dim() == 4:
- # transform each image in the batch
- images_np = []
- for b in range(label_tensor.size(0)):
- one_image = label_tensor[b]
- one_image_np = tensor2label(one_image, n_label, imtype)
- images_np.append(one_image_np.reshape(1, *one_image_np.shape))
- images_np = np.concatenate(images_np, axis=0)
- # if tile:
- # images_tiled = tile_images(images_np)
- # return images_tiled
- # else:
- # images_np = images_np[0]
- # return images_np
- return images_np
-
- if label_tensor.dim() == 1:
- return np.zeros((64, 64, 3), dtype=np.uint8)
- if n_label == 0:
- return tensor2im(label_tensor, imtype)
- label_tensor = label_tensor.cpu().float()
- if label_tensor.size()[0] > 1:
- label_tensor = label_tensor.max(0, keepdim=True)[1]
- label_tensor = Colorize(n_label)(label_tensor)
- label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))
- result = label_numpy.astype(imtype)
- return result
-
-
-def save_image(image_numpy, image_path, create_dir=False):
- if create_dir:
- os.makedirs(os.path.dirname(image_path), exist_ok=True)
- if len(image_numpy.shape) == 2:
- image_numpy = np.expand_dims(image_numpy, axis=2)
- if image_numpy.shape[2] == 1:
- image_numpy = np.repeat(image_numpy, 3, 2)
- image_pil = Image.fromarray(image_numpy)
-
- # save to png
- image_pil.save(image_path.replace(".jpg", ".png"))
-
-
-def mkdirs(paths):
- if isinstance(paths, list) and not isinstance(paths, str):
- for path in paths:
- mkdir(path)
- else:
- mkdir(paths)
-
-
-def mkdir(path):
- if not os.path.exists(path):
- os.makedirs(path)
-
-
-def atoi(text):
- return int(text) if text.isdigit() else text
-
-
-def natural_keys(text):
- """
- alist.sort(key=natural_keys) sorts in human order
- http://nedbatchelder.com/blog/200712/human_sorting.html
- (See Toothy's implementation in the comments)
- """
- return [atoi(c) for c in re.split("(\d+)", text)]
-
-
-def natural_sort(items):
- items.sort(key=natural_keys)
-
-
-def str2bool(v):
- if v.lower() in ("yes", "true", "t", "y", "1"):
- return True
- elif v.lower() in ("no", "false", "f", "n", "0"):
- return False
- else:
- raise argparse.ArgumentTypeError("Boolean value expected.")
-
-
-def find_class_in_module(target_cls_name, module):
- target_cls_name = target_cls_name.replace("_", "").lower()
- clslib = importlib.import_module(module)
- cls = None
- for name, clsobj in clslib.__dict__.items():
- if name.lower() == target_cls_name:
- cls = clsobj
-
- if cls is None:
- print(
- "In %s, there should be a class whose name matches %s in lowercase without underscore(_)"
- % (module, target_cls_name)
- )
- exit(0)
-
- return cls
-
-
-def save_network(net, label, epoch, opt):
- save_filename = "%s_net_%s.pth" % (epoch, label)
- save_path = os.path.join(opt.checkpoints_dir, opt.name, save_filename)
- torch.save(net.cpu().state_dict(), save_path)
- if len(opt.gpu_ids) and torch.cuda.is_available():
- net.cuda()
-
-
-def load_network(net, label, epoch, opt):
- save_filename = "%s_net_%s.pth" % (epoch, label)
- save_dir = os.path.join(opt.checkpoints_dir, opt.name)
- save_path = os.path.join(save_dir, save_filename)
- if os.path.exists(save_path):
- weights = torch.load(save_path)
- net.load_state_dict(weights)
- return net
-
-
-###############################################################################
-# Code from
-# https://github.com/ycszen/pytorch-seg/blob/master/transform.py
-# Modified so it complies with the Citscape label map colors
-###############################################################################
-def uint82bin(n, count=8):
- """returns the binary of integer n, count refers to amount of bits"""
- return "".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])
-
-
-class Colorize(object):
- def __init__(self, n=35):
- self.cmap = labelcolormap(n)
- self.cmap = torch.from_numpy(self.cmap[:n])
-
- def __call__(self, gray_image):
- size = gray_image.size()
- color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
-
- for label in range(0, len(self.cmap)):
- mask = (label == gray_image[0]).cpu()
- color_image[0][mask] = self.cmap[label][0]
- color_image[1][mask] = self.cmap[label][1]
- color_image[2][mask] = self.cmap[label][2]
-
- return color_image
diff --git a/spaces/yashsrivastava/speech-to-text-yash/app.py b/spaces/yashsrivastava/speech-to-text-yash/app.py
deleted file mode 100644
index ff710e0a5a0e2eab86361d5f30aaaac991c33465..0000000000000000000000000000000000000000
--- a/spaces/yashsrivastava/speech-to-text-yash/app.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# In[ ]:
-
-
-import soundfile as sf
-import torch
-from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
-import argparse
-from glob import glob
-import torchaudio
-import subprocess
-import gradio as gr
-
-resampler = torchaudio.transforms.Resample(48_000, 16_000)
-
-def get_filename(wav_file):
- filename_local = wav_file.split('/')[-1][:-4]
- filename_new = '/tmp/'+filename_local+'_16.wav'
-
-
- subprocess.call(["sox {} -r {} -b 16 -c 1 {}".format(wav_file, str(16000), filename_new)], shell=True)
- return filename_new
-
-def parse_transcription(wav_file):
- # load pretrained model
- processor = Wav2Vec2Processor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
- model = Wav2Vec2ForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
-
- # load audio
-
-
- wav_file = get_filename(wav_file.name)
- audio_input, sample_rate = sf.read(wav_file)
- #test_file = resampler(test_file[0])
-
- # pad input values and return pt tensor
- input_values = processor(audio_input, sampling_rate=16_000, return_tensors="pt").input_values
-
- # INFERENCE
- # retrieve logits & take argmax
- logits = model(input_values).logits
- predicted_ids = torch.argmax(logits, dim=-1)
-
- # transcribe
- transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
- return transcription
-
-
-# In[ ]:
-
-
-import gradio as gr
-title = "Speech-to-Text-English"
-description = "Upload a English audio clip, and let AI do the hard work of transcribing."
-
-gr.Interface(
- parse_transcription,
- title=title,
- inputs=gr.inputs.Audio(label="Record Audio File", type="file", source = "microphone"),
- description=description, outputs = "text").launch(inline = False)
-
diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/big_bird/modeling_big_bird.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/big_bird/modeling_big_bird.py
deleted file mode 100644
index 867aca67e99e8c9726ab690ee0177ce1c394cbc1..0000000000000000000000000000000000000000
--- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/big_bird/modeling_big_bird.py
+++ /dev/null
@@ -1,3156 +0,0 @@
-# coding=utf-8
-# Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" PyTorch BigBird model."""
-
-
-import math
-import os
-from dataclasses import dataclass
-from typing import Optional, Tuple, Union
-
-import numpy as np
-import torch
-import torch.utils.checkpoint
-from torch import nn
-from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
-
-from ...activations import ACT2FN
-from ...modeling_outputs import (
- BaseModelOutputWithPastAndCrossAttentions,
- BaseModelOutputWithPoolingAndCrossAttentions,
- CausalLMOutputWithCrossAttentions,
- MaskedLMOutput,
- MultipleChoiceModelOutput,
- SequenceClassifierOutput,
- TokenClassifierOutput,
-)
-from ...modeling_utils import PreTrainedModel
-from ...pytorch_utils import apply_chunking_to_forward
-from ...utils import (
- ModelOutput,
- add_code_sample_docstrings,
- add_start_docstrings,
- add_start_docstrings_to_model_forward,
- logging,
- replace_return_docstrings,
-)
-from .configuration_big_bird import BigBirdConfig
-
-
-logger = logging.get_logger(__name__)
-
-_CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base"
-_CONFIG_FOR_DOC = "BigBirdConfig"
-
-BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [
- "google/bigbird-roberta-base",
- "google/bigbird-roberta-large",
- "google/bigbird-base-trivia-itc",
- # See all BigBird models at https://huggingface.co/models?filter=big_bird
-]
-
-_TRIVIA_QA_MAPPING = {
- "big_bird_attention": "attention/self",
- "output_layer_norm": "output/LayerNorm",
- "attention_output": "attention/output/dense",
- "output": "output/dense",
- "self_attention_layer_norm": "attention/output/LayerNorm",
- "intermediate": "intermediate/dense",
- "word_embeddings": "bert/embeddings/word_embeddings",
- "position_embedding": "bert/embeddings/position_embeddings",
- "type_embeddings": "bert/embeddings/token_type_embeddings",
- "embeddings": "bert/embeddings",
- "layer_normalization": "output/LayerNorm",
- "layer_norm": "LayerNorm",
- "trivia_qa_head": "qa_classifier",
- "dense": "intermediate/dense",
- "dense_1": "qa_outputs",
-}
-
-
-def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False):
- """Load tf checkpoints in a pytorch model."""
-
- def load_tf_weights_bert(init_vars, tf_path):
- names = []
- tf_weights = {}
-
- for name, shape in init_vars:
- array = tf.train.load_variable(tf_path, name)
- name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm")
- logger.info(f"Loading TF weight {name} with shape {shape}")
- names.append(name)
- tf_weights[name] = array
-
- return names, tf_weights
-
- def load_tf_weights_trivia_qa(init_vars):
- names = []
- tf_weights = {}
-
- for i, var in enumerate(init_vars):
- name_items = var.name.split("/")
-
- if "transformer_scaffold" in name_items[0]:
- layer_name_items = name_items[0].split("_")
- if len(layer_name_items) < 3:
- layer_name_items += [0]
-
- name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}"
-
- name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[
- :-2
- ] # remove last :0 in variable
-
- if "self/attention/output" in name:
- name = name.replace("self/attention/output", "output")
-
- if i >= len(init_vars) - 2:
- name = name.replace("intermediate", "output")
-
- logger.info(f"Loading TF weight {name} with shape {var.shape}")
- array = var.value().numpy()
- names.append(name)
- tf_weights[name] = array
-
- return names, tf_weights
-
- try:
- import re
-
- import numpy as np
- import tensorflow as tf
- except ImportError:
- logger.error(
- "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
- "https://www.tensorflow.org/install/ for installation instructions."
- )
- raise
- tf_path = os.path.abspath(tf_checkpoint_path)
- logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
-
- # Load weights from TF model
- init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path)
-
- if len(init_vars) <= 0:
- raise ValueError("Loaded trained variables cannot be empty.")
-
- pt_names = list(model.state_dict().keys())
-
- if is_trivia_qa:
- names, tf_weights = load_tf_weights_trivia_qa(init_vars)
- else:
- names, tf_weights = load_tf_weights_bert(init_vars, tf_path)
-
- for txt_name in names:
- array = tf_weights[txt_name]
- name = txt_name.split("/")
- # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
- # which are not required for using pretrained model
- if any(
- n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
- for n in name
- ):
- logger.info(f"Skipping {'/'.join(name)}")
- continue
- pointer = model
- pt_name = []
- for m_name in name:
- if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
- scope_names = re.split(r"_(\d+)", m_name)
- else:
- scope_names = [m_name]
- if scope_names[0] == "kernel" or scope_names[0] == "gamma":
- pointer = getattr(pointer, "weight")
- pt_name.append("weight")
- elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
- pointer = getattr(pointer, "bias")
- pt_name.append("bias")
- elif scope_names[0] == "output_weights":
- pointer = getattr(pointer, "weight")
- pt_name.append("weight")
- elif scope_names[0] == "squad":
- pointer = getattr(pointer, "classifier")
- pt_name.append("classifier")
- elif scope_names[0] == "transform":
- pointer = getattr(pointer, "transform")
- pt_name.append("transform")
- if ("bias" in name) or ("kernel" in name):
- pointer = getattr(pointer, "dense")
- pt_name.append("dense")
- elif ("beta" in name) or ("gamma" in name):
- pointer = getattr(pointer, "LayerNorm")
- pt_name.append("LayerNorm")
- else:
- try:
- pointer = getattr(pointer, scope_names[0])
- pt_name.append(f"{scope_names[0]}")
- except AttributeError:
- logger.info(f"Skipping {m_name}")
- continue
- if len(scope_names) >= 2:
- num = int(scope_names[1])
- pointer = pointer[num]
- pt_name.append(f"{num}")
- if m_name[-11:] == "_embeddings" or m_name == "embeddings":
- pointer = getattr(pointer, "weight")
- pt_name.append("weight")
- elif m_name == "kernel":
- array = np.transpose(array)
- try:
- if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape):
- # print(txt_name, array.shape)
- if (
- txt_name.endswith("attention/self/key/kernel")
- or txt_name.endswith("attention/self/query/kernel")
- or txt_name.endswith("attention/self/value/kernel")
- ):
- array = array.transpose(1, 0, 2).reshape(pointer.shape)
- elif txt_name.endswith("attention/output/dense/kernel"):
- array = array.transpose(0, 2, 1).reshape(pointer.shape)
- else:
- array = array.reshape(pointer.shape)
-
- if pointer.shape != array.shape:
- raise ValueError(
- f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}."
- )
- except ValueError as e:
- e.args += (pointer.shape, array.shape)
- raise
- pt_weight_name = ".".join(pt_name)
- logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.")
- pointer.data = torch.from_numpy(array)
- tf_weights.pop(txt_name, None)
- pt_names.remove(pt_weight_name)
-
- logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
- logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.")
- return model
-
-
-class BigBirdEmbeddings(nn.Module):
- """Construct the embeddings from word, position and token_type embeddings."""
-
- # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
- def __init__(self, config):
- super().__init__()
- self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
- self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
-
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
- # any TensorFlow checkpoint file
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- # position_ids (1, len position emb) is contiguous in memory and exported when serialized
- self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
- self.register_buffer(
- "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
- )
- self.register_buffer(
- "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
- )
- # End copy
-
- self.rescale_embeddings = config.rescale_embeddings
- self.hidden_size = config.hidden_size
-
- def forward(
- self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
- ):
- if input_ids is not None:
- input_shape = input_ids.size()
- else:
- input_shape = inputs_embeds.size()[:-1]
-
- seq_length = input_shape[1]
-
- if position_ids is None:
- position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
-
- # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
- # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
- # issue #5664
- if token_type_ids is None:
- if hasattr(self, "token_type_ids"):
- buffered_token_type_ids = self.token_type_ids[:, :seq_length]
- buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
- token_type_ids = buffered_token_type_ids_expanded
- else:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
-
- if inputs_embeds is None:
- inputs_embeds = self.word_embeddings(input_ids)
-
- if self.rescale_embeddings:
- inputs_embeds = inputs_embeds * (self.hidden_size**0.5)
-
- token_type_embeddings = self.token_type_embeddings(token_type_ids)
-
- embeddings = inputs_embeds + token_type_embeddings
-
- position_embeddings = self.position_embeddings(position_ids)
- embeddings += position_embeddings
-
- embeddings = self.dropout(embeddings)
- embeddings = self.LayerNorm(embeddings)
- return embeddings
-
-
-class BigBirdSelfAttention(nn.Module):
- def __init__(self, config):
- super().__init__()
- if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
- raise ValueError(
- f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
- f"heads ({config.num_attention_heads})"
- )
-
- self.num_attention_heads = config.num_attention_heads
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
- self.all_head_size = self.num_attention_heads * self.attention_head_size
-
- self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
- self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
- self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
-
- self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
- self.is_decoder = config.is_decoder
-
- def transpose_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(*new_x_shape)
- return x.permute(0, 2, 1, 3)
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_value=None,
- output_attentions=False,
- ):
- mixed_query_layer = self.query(hidden_states)
-
- # If this is instantiated as a cross-attention module, the keys
- # and values come from an encoder; the attention mask needs to be
- # such that the encoder's padding tokens are not attended to.
- is_cross_attention = encoder_hidden_states is not None
-
- if is_cross_attention and past_key_value is not None:
- # reuse k,v, cross_attentions
- key_layer = past_key_value[0]
- value_layer = past_key_value[1]
- attention_mask = encoder_attention_mask
- elif is_cross_attention:
- key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
- value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
- attention_mask = encoder_attention_mask
- elif past_key_value is not None:
- key_layer = self.transpose_for_scores(self.key(hidden_states))
- value_layer = self.transpose_for_scores(self.value(hidden_states))
- key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
- value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
- else:
- key_layer = self.transpose_for_scores(self.key(hidden_states))
- value_layer = self.transpose_for_scores(self.value(hidden_states))
-
- query_layer = self.transpose_for_scores(mixed_query_layer)
-
- if self.is_decoder:
- # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
- # Further calls to cross_attention layer can then reuse all cross-attention
- # key/value_states (first "if" case)
- # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
- # all previous decoder key/value_states. Further calls to uni-directional self-attention
- # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
- # if encoder bi-directional self-attention `past_key_value` is always `None`
- past_key_value = (key_layer, value_layer)
-
- # Take the dot product between "query" and "key" to get the raw attention scores.
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
-
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
- if attention_mask is not None:
- # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function)
- attention_scores = attention_scores + attention_mask
-
- # Normalize the attention scores to probabilities.
- attention_probs = nn.functional.softmax(attention_scores, dim=-1)
-
- # This is actually dropping out entire tokens to attend to, which might
- # seem a bit unusual, but is taken from the original Transformer paper.
- attention_probs = self.dropout(attention_probs)
-
- # Mask heads if we want to
- if head_mask is not None:
- attention_probs = attention_probs * head_mask
-
- context_layer = torch.matmul(attention_probs, value_layer)
-
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
- context_layer = context_layer.view(*new_context_layer_shape)
-
- outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
-
- if self.is_decoder:
- outputs = outputs + (past_key_value,)
- return outputs
-
-
-class BigBirdBlockSparseAttention(nn.Module):
- def __init__(self, config, seed=None):
- super().__init__()
-
- self.max_seqlen = config.max_position_embeddings
- self.seed = seed
-
- if config.hidden_size % config.num_attention_heads != 0:
- raise ValueError(
- f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
- f"heads {config.num_attention_heads}."
- )
-
- self.num_attention_heads = config.num_attention_heads
- self.num_random_blocks = config.num_random_blocks
- self.block_size = config.block_size
-
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
- self.all_head_size = self.num_attention_heads * self.attention_head_size
-
- self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
- self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
- self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
-
- def transpose_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(*new_x_shape)
- return x.permute(0, 2, 1, 3)
-
- def forward(
- self,
- hidden_states,
- band_mask=None,
- from_mask=None,
- to_mask=None,
- from_blocked_mask=None,
- to_blocked_mask=None,
- output_attentions=None,
- ):
- # Currently this `class` can't be used in decoder.
-
- batch_size, seqlen, _ = hidden_states.size()
- to_seq_length = from_seq_length = seqlen
- from_block_size = to_block_size = self.block_size
-
- if from_seq_length % from_block_size != 0:
- raise ValueError("Query sided sequence length must be multiple of block size")
-
- if to_seq_length % to_block_size != 0:
- raise ValueError("Key/Value sided sequence length must be multiple of block size")
-
- query_layer = self.transpose_for_scores(self.query(hidden_states))
- key_layer = self.transpose_for_scores(self.key(hidden_states))
- value_layer = self.transpose_for_scores(self.value(hidden_states))
-
- context_layer, attention_probs = self.bigbird_block_sparse_attention(
- query_layer,
- key_layer,
- value_layer,
- band_mask,
- from_mask,
- to_mask,
- from_blocked_mask,
- to_blocked_mask,
- self.num_attention_heads,
- self.num_random_blocks,
- self.attention_head_size,
- from_block_size,
- to_block_size,
- batch_size,
- from_seq_length,
- to_seq_length,
- seed=self.seed,
- plan_from_length=None,
- plan_num_rand_blocks=None,
- output_attentions=output_attentions,
- )
-
- context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
-
- outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
- return outputs
-
- @staticmethod
- def torch_bmm_nd(inp_1, inp_2, ndim=None):
- """Fast nd matrix multiplication"""
- # faster replacement of torch.einsum ("bhqk,bhkd->bhqd")
- return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(
- inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])
- )
-
- @staticmethod
- def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):
- """Fast nd matrix multiplication with transpose"""
- # faster replacement of torch.einsum (bhqd,bhkd->bhqk)
- return torch.bmm(
- inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)
- ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))
-
- def bigbird_block_sparse_attention(
- self,
- query_layer,
- key_layer,
- value_layer,
- band_mask,
- from_mask,
- to_mask,
- from_blocked_mask,
- to_blocked_mask,
- n_heads,
- n_rand_blocks,
- attention_head_size,
- from_block_size,
- to_block_size,
- batch_size,
- from_seq_len,
- to_seq_len,
- seed,
- plan_from_length,
- plan_num_rand_blocks,
- output_attentions,
- ):
- # BigBird block-sparse attention as suggested in paper
-
- # ITC:
- # global tokens: 2 x block_size
- # window tokens: 3 x block_size
- # random tokens: num_rand_tokens x block_size
-
- # ETC:
- # global tokens: extra_globals_tokens + 2 x block_size
- # window tokens: 3 x block_size
- # random tokens: num_rand_tokens x block_size
-
- # Note:
- # 1) Currently, ETC is not supported.
- # 2) Window size is fixed to 3 blocks & it can be changed only by
- # changing `block_size`.
- # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be
- # controlled only by `block_size`.
-
- # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)
- # hence following code can be divided into 5 parts.
-
- if from_seq_len // from_block_size != to_seq_len // to_block_size:
- raise ValueError("Error the number of blocks needs to be same!")
-
- rsqrt_d = 1 / math.sqrt(attention_head_size)
- bsz = batch_size
- attn_mask_penalty = -10000.0
-
- # generate random attention and corresponding masks
- np.random.seed(seed)
- if from_seq_len in [1024, 3072, 4096]: # old plans used in paper
- rand_attn = [
- self._bigbird_block_rand_mask(
- self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024
- )[: (from_seq_len // from_block_size - 2)]
- for _ in range(n_heads)
- ]
- else:
- if plan_from_length is None:
- plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(
- from_seq_len, from_block_size, n_rand_blocks
- )
-
- rand_attn = self._bigbird_block_rand_mask_with_head(
- from_seq_length=from_seq_len,
- to_seq_length=to_seq_len,
- from_block_size=from_block_size,
- to_block_size=to_block_size,
- num_heads=n_heads,
- plan_from_length=plan_from_length,
- plan_num_rand_blocks=plan_num_rand_blocks,
- )
-
- rand_attn = np.stack(rand_attn, axis=0)
- rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)
- rand_attn.unsqueeze_(0)
- rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)
-
- rand_mask = self._create_rand_mask_from_inputs(
- from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size
- )
-
- blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)
- blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
- blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
-
- # preparing block for randn attn
- gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)
- gathered_key = gathered_key.view(
- bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
- ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
- gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)
- gathered_value = gathered_value.view(
- bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
- ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
-
- # 1st PART
- # 1st block (global block) attention scores
- # q[0] x (k[0], k[1], k[2], k[3], k[4] .... )
-
- # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
- first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)
-
- first_product = first_product * rsqrt_d
- first_product += (1.0 - to_mask) * attn_mask_penalty
- first_attn_weights = nn.functional.softmax(
- first_product, dim=-1
- ) # [bsz, n_heads, from_block_size, to_seq_len]
-
- # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
- first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)
- first_context_layer.unsqueeze_(2)
-
- # 2nd PART
- # 2nd block attention scores
- # q[1] x (sliding_keys, random_keys, global_keys)
- # sliding key blocks -> 2nd, 3rd blocks
- # global key blocks -> 1st block
-
- second_key_mat = torch.cat(
- [
- blocked_key_matrix[:, :, 0],
- blocked_key_matrix[:, :, 1],
- blocked_key_matrix[:, :, 2],
- blocked_key_matrix[:, :, -1],
- gathered_key[:, :, 0],
- ],
- dim=2,
- ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
- second_value_mat = torch.cat(
- [
- blocked_value_matrix[:, :, 0],
- blocked_value_matrix[:, :, 1],
- blocked_value_matrix[:, :, 2],
- blocked_value_matrix[:, :, -1],
- gathered_value[:, :, 0],
- ],
- dim=2,
- ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
-
- # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
- second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)
- second_seq_pad = torch.cat(
- [
- to_mask[:, :, :, : 3 * to_block_size],
- to_mask[:, :, :, -to_block_size:],
- to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
- ],
- dim=3,
- )
- second_rand_pad = torch.cat(
- [
- rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
- rand_mask[:, :, 0],
- ],
- dim=3,
- )
- second_product = second_product * rsqrt_d
- second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty
- second_attn_weights = nn.functional.softmax(
- second_product, dim=-1
- ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
-
- # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
- second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)
-
- second_context_layer.unsqueeze_(2)
-
- # 3rd PART
- # Middle blocks attention scores
- # q[-2:2] x (sliding_keys, random_keys, global_keys)
- # sliding attn is calculated using special trick of shifting tokens as discussed in paper
- # random keys are generated by taking random indices as per `rand_attn`
- # global keys -> 1st & last block
-
- exp_blocked_key_matrix = torch.cat(
- [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3
- ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
- exp_blocked_value_matrix = torch.cat(
- [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],
- dim=3,
- ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
- middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
-
- # sliding attention scores for q[-2:2]
- # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
- inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)
- # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]
- inner_band_product = inner_band_product * rsqrt_d
-
- # randn attention scores for q[-2:2]
- # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
- rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)
- # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]
- rand_band_product = rand_band_product * rsqrt_d
-
- # Including 1st block (since it's global)
- first_band_product = torch.einsum(
- "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0]
- ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
- first_band_product = first_band_product * rsqrt_d
-
- # Including last block (since it's global)
- last_band_product = torch.einsum(
- "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1]
- ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
- last_band_product = last_band_product * rsqrt_d
-
- # masking padded tokens
- inner_band_product += (1.0 - band_mask) * attn_mask_penalty
- first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty
- last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty
- rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty
-
- # completing attention scores matrix for all q[-2:2]
- band_product = torch.cat(
- [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1
- ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
-
- # safely doing softmax since attention matrix is completed
- attn_weights = nn.functional.softmax(
- band_product, dim=-1
- ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
-
- # contribution of sliding keys
- # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
- context_layer = self.torch_bmm_nd(
- attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5
- )
- # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
-
- # adding contribution of random keys
- # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
- context_layer += self.torch_bmm_nd(
- attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5
- )
- # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
-
- # adding contribution of global keys
- context_layer += torch.einsum(
- "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]
- ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
- context_layer += torch.einsum(
- "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]
- ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
-
- # 4th PART
- # last 2nd token attention scores
- # q[-2] x (sliding_keys, random_keys, global_keys)
- # sliding key blocks -> last 3 blocks
- # global key block -> 1st block
- # random key block -> based on indices stored in `randn_attn`
-
- second_last_key_mat = torch.cat(
- [
- blocked_key_matrix[:, :, 0],
- blocked_key_matrix[:, :, -3],
- blocked_key_matrix[:, :, -2],
- blocked_key_matrix[:, :, -1],
- gathered_key[:, :, -1],
- ],
- dim=2,
- ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]
- second_last_value_mat = torch.cat(
- [
- blocked_value_matrix[:, :, 0],
- blocked_value_matrix[:, :, -3],
- blocked_value_matrix[:, :, -2],
- blocked_value_matrix[:, :, -1],
- gathered_value[:, :, -1],
- ],
- dim=2,
- ) # [bsz, n_heads, (4+r)*to_block_size, -1]
-
- # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
- second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)
- second_last_seq_pad = torch.cat(
- [
- to_mask[:, :, :, :to_block_size],
- to_mask[:, :, :, -3 * to_block_size :],
- to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
- ],
- dim=3,
- )
- second_last_rand_pad = torch.cat(
- [
- rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
- rand_mask[:, :, -1],
- ],
- dim=3,
- )
- second_last_product = second_last_product * rsqrt_d
- second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty
- second_last_attn_weights = nn.functional.softmax(
- second_last_product, dim=-1
- ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
-
- # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
- second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)
- second_last_context_layer.unsqueeze_(2)
-
- # 5th PART
- # last block (global) attention scores
- # q[-1] x (k[0], k[1], k[2], k[3], .... )
-
- # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
- last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)
- last_product = last_product * rsqrt_d
- last_product += (1.0 - to_mask) * attn_mask_penalty
- last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]
-
- # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
- last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)
- last_context_layer.unsqueeze_(2)
-
- # combining representations of all tokens
- context_layer = torch.cat(
- [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],
- dim=2,
- )
- context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask
- context_layer = torch.transpose(context_layer, 1, 2)
-
- # this is just for visualizing; forward pass doesn't depend on following code
- if output_attentions:
- # TODO(PVP): need to verify if below code is correct
- attention_probs = torch.zeros(
- bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device
- )
-
- # 1st query block
- # corresponding to `first_context_layer`
- attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global
-
- # 2nd query block
- # corresponding to `second_context_layer`
- attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[
- :, :, :, : 3 * to_block_size
- ] # 1st three key blocks (global + sliding)
- attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[
- :, :, :, 3 * to_block_size : 4 * to_block_size
- ] # last key block (global)
- # random keys
- for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):
- # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
- for p2, i2, w2 in zip(range(n_heads), i1, w1):
- # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
- attn_probs_view = attention_probs.view(
- bsz,
- n_heads,
- from_seq_len // from_block_size,
- from_block_size,
- to_seq_len // to_block_size,
- to_block_size,
- )
- right_slice = w2[:, 4 * to_block_size :]
- attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(
- from_block_size, n_rand_blocks, to_block_size
- )
-
- # Middle query blocks
- # corresponding to `context_layer`
- # sliding keys
- for q_idx in range(from_seq_len // from_block_size - 4):
- attn_probs_view = attention_probs.view(
- bsz,
- n_heads,
- from_seq_len // from_block_size,
- from_block_size,
- to_seq_len // to_block_size,
- to_block_size,
- )[:, :, 2:-2, :, 1:-1, :]
- right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]
- attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(
- bsz, n_heads, from_block_size, 3, to_block_size
- ) # inner_band_product
- # global keys (corresponding to 1st key block)
- attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[
- :, :, :, :, :to_block_size
- ].view(
- bsz, n_heads, -1, to_block_size
- ) # first_band_product
- # global keys (corresponding to last key block)
- attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[
- :, :, :, :, -to_block_size:
- ].view(
- bsz, n_heads, -1, to_block_size
- ) # last_band_product
- # random keys
- for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):
- # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
- for p2, i2, w2 in zip(range(n_heads), i1, w1):
- # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
- for q_idx in range(1, len(i2) - 1):
- attn_probs_view = attention_probs.view(
- bsz,
- n_heads,
- from_seq_len // from_block_size,
- from_block_size,
- to_seq_len // to_block_size,
- to_block_size,
- )
- right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]
- attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(
- from_block_size, n_rand_blocks, to_block_size
- )
-
- # Second-last query block
- # corresponding to `second_last_context_layer`
- attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[
- :, :, :, :to_block_size
- ] # 1st key block (global)
- attention_probs[
- :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :
- ] = second_last_attn_weights[
- :, :, :, to_block_size : 4 * to_block_size
- ] # last three blocks (global + sliding)
- # random keys
- for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):
- # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
- for p2, i2, w2 in zip(range(n_heads), i1, w1):
- # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
- attn_probs_view = attention_probs.view(
- bsz,
- n_heads,
- from_seq_len // from_block_size,
- from_block_size,
- to_seq_len // to_block_size,
- to_block_size,
- )
- right_slice = w2[:, 4 * to_block_size :]
- attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(
- from_block_size, n_rand_blocks, to_block_size
- )
-
- # last query block
- # corresponding to `last_context_layer`
- attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global
-
- else:
- attention_probs = None
-
- return context_layer, attention_probs
-
- @staticmethod
- def torch_gather_b2(params, indices):
- # this operation is equivalent to tf.gather when batch_dims=2
-
- if params.shape[:2] != indices.shape[:2]:
- raise ValueError(
- "Make sure that the first two dimensions of params and indices are identical, but"
- f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}"
- )
- num_indices_to_gather = indices.shape[-2] * indices.shape[-1]
- num_indices_to_pick_from = params.shape[2]
-
- shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)
- indices_shift = torch.div(shift, num_indices_to_gather, rounding_mode="floor") * num_indices_to_pick_from
-
- flattened_indices = indices.view(-1) + indices_shift
- flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])
-
- out_flattened = flattened_params.index_select(0, flattened_indices)
-
- out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])
- return out
-
- @staticmethod
- def _create_rand_mask_from_inputs(
- from_blocked_mask,
- to_blocked_mask,
- rand_attn,
- num_attention_heads,
- num_rand_blocks,
- batch_size,
- from_seq_length,
- from_block_size,
- ):
- """
- Create 3D attention mask from a 2D tensor mask.
-
- Args:
- from_blocked_mask: 2D Tensor of shape [batch_size,
- from_seq_length//from_block_size, from_block_size].
- to_blocked_mask: int32 Tensor of shape [batch_size,
- to_seq_length//to_block_size, to_block_size].
- rand_attn: [batch_size, num_attention_heads,
- from_seq_length//from_block_size-2, num_rand_blocks]
- num_attention_heads: int. Number of attention heads.
- num_rand_blocks: int. Number of random chunks per row.
- batch_size: int. Batch size for computation.
- from_seq_length: int. length of from sequence.
- from_block_size: int. size of block in from sequence.
-
- Returns:
- float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,
- from_block_size, num_rand_blocks*to_block_size].
- """
- num_windows = from_seq_length // from_block_size - 2
- rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])
- rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)
- rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask)
- return rand_mask
-
- @staticmethod
- def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):
- """
- Gives the plan of where to put random attention.
-
- Args:
- from_seq_length: int. length of from sequence.
- from_block_size: int. size of block in from sequence.
- num_rand_blocks: int. Number of random chunks per row.
-
- Returns:
- plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for
- each block
- """
-
- plan_from_length = []
- plan_num_rand_blocks = []
- if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):
- plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))
- plan_num_rand_blocks.append(num_rand_blocks)
- plan_from_length.append(from_seq_length)
- plan_num_rand_blocks.append(0)
- elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):
- plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))
- plan_num_rand_blocks.append(num_rand_blocks // 2)
- plan_from_length.append(from_seq_length)
- plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))
- else:
- plan_from_length.append(from_seq_length)
- plan_num_rand_blocks.append(num_rand_blocks)
-
- return plan_from_length, plan_num_rand_blocks
-
- def _bigbird_block_rand_mask(
- self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1
- ):
- """
- Create adjacency list of random attention.
-
- Args:
- from_seq_length: int. length of from sequence.
- to_seq_length: int. length of to sequence.
- from_block_size: int. size of block in from sequence.
- to_block_size: int. size of block in to sequence.
- num_rand_blocks: int. Number of random chunks per row.
- last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
- if positive then num_rand_blocks blocks chosen only up to last_idx.
-
- Returns:
- adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
- """
- # using this method when from_seq_length in [1024, 3072, 4096]
-
- if from_seq_length // from_block_size != to_seq_length // to_block_size:
- raise ValueError("Error the number of blocks needs to be same!")
-
- rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
- # During inference (eval) no randomness
- if not self.training:
- return rand_attn
- middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
- last = to_seq_length // to_block_size - 1
- if last_idx > (2 * to_block_size):
- last = (last_idx // to_block_size) - 1
-
- r = num_rand_blocks # shorthand
- for i in range(1, from_seq_length // from_block_size - 1):
- start = i - 2
- end = i
- if i == 1:
- rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
- elif i == 2:
- rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
- elif i == from_seq_length // from_block_size - 3:
- rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
- # Missing -3: should have been sliced till last-3
- elif i == from_seq_length // from_block_size - 2:
- rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
- # Missing -4: should have been sliced till last-4
- else:
- if start > last:
- start = last
- rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
- elif (end + 1) == last:
- rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
- else:
- rand_attn[i - 1, :] = np.random.permutation(
- np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))
- )[:r]
- return rand_attn
-
- def _bigbird_block_rand_mask_with_head(
- self,
- from_seq_length,
- to_seq_length,
- from_block_size,
- to_block_size,
- num_heads,
- plan_from_length,
- plan_num_rand_blocks,
- window_block_left=1,
- window_block_right=1,
- global_block_top=1,
- global_block_bottom=1,
- global_block_left=1,
- global_block_right=1,
- ):
- """
- Create adjacency list of random attention.
-
- Args:
- from_seq_length: int. length of from sequence.
- to_seq_length: int. length of to sequence.
- from_block_size: int. size of block in from sequence.
- to_block_size: int. size of block in to sequence.
- num_heads: int. total number of heads.
- plan_from_length: list. plan from length where num_random_blocks are chosen from.
- plan_num_rand_blocks: list. number of rand blocks within the plan.
- window_block_left: int. number of blocks of window to left of a block.
- window_block_right: int. number of blocks of window to right of a block.
- global_block_top: int. number of blocks at the top.
- global_block_bottom: int. number of blocks at the bottom.
- global_block_left: int. Number of blocks globally used to the left.
- global_block_right: int. Number of blocks globally used to the right.
-
- Returns:
- adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by
- num_rand_blocks
- """
- # using this method when from_seq_length not in [1024, 3072, 4096]
-
- if from_seq_length // from_block_size != to_seq_length // to_block_size:
- raise ValueError("Error the number of blocks needs to be same!")
-
- if from_seq_length not in plan_from_length:
- raise ValueError("Error from sequence length not in plan!")
-
- # Total number of blocks in the mmask
- num_blocks = from_seq_length // from_block_size
- # Number of blocks per plan
- plan_block_length = np.array(plan_from_length) // from_block_size
- # till when to follow plan
- max_plan_idx = plan_from_length.index(from_seq_length)
-
- # Random Attention adjacency list
- rand_attn = [
- np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)
- for i in range(num_heads)
- ]
- # During inference (eval) no randomness
- if not self.training:
- for nh in range(num_heads):
- rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
- return rand_attn
-
- # We will go iteratively over the plan blocks and pick random number of
- # Attention blocks from the legally allowed blocks
- for plan_idx in range(max_plan_idx + 1):
- rnd_r_cnt = 0
- if plan_idx > 0:
- # set the row for all from_blocks starting from 0 to
- # plan_block_length[plan_idx-1]
- # column indx start fromm plan_block_length[plan_idx-1] and ends at
- # plan_block_length[plan_idx]
- if plan_num_rand_blocks[plan_idx] > 0:
- rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
- curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
- for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):
- for h in range(num_heads):
- rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
- block_id=blk_rw_idx,
- to_start_block_id=plan_block_length[plan_idx - 1],
- to_end_block_id=plan_block_length[plan_idx],
- num_rand_blocks=plan_num_rand_blocks[plan_idx],
- window_block_left=window_block_left,
- window_block_right=window_block_right,
- global_block_left=global_block_left,
- global_block_right=global_block_right,
- )
-
- for pl_id in range(plan_idx):
- if plan_num_rand_blocks[pl_id] == 0:
- continue
- for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):
- rnd_r_cnt = 0
- to_start_block_id = 0
- if pl_id > 0:
- rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))
- to_start_block_id = plan_block_length[pl_id - 1]
- curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))
- for h in range(num_heads):
- rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
- block_id=blk_rw_idx,
- to_start_block_id=to_start_block_id,
- to_end_block_id=plan_block_length[pl_id],
- num_rand_blocks=plan_num_rand_blocks[pl_id],
- window_block_left=window_block_left,
- window_block_right=window_block_right,
- global_block_left=global_block_left,
- global_block_right=global_block_right,
- )
-
- if plan_num_rand_blocks[plan_idx] == 0:
- continue
- curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
- from_start_block_id = global_block_top
- to_start_block_id = 0
- if plan_idx > 0:
- rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
- from_start_block_id = plan_block_length[plan_idx - 1]
- to_start_block_id = plan_block_length[plan_idx - 1]
-
- for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):
- for h in range(num_heads):
- rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
- block_id=blk_rw_idx,
- to_start_block_id=to_start_block_id,
- to_end_block_id=plan_block_length[plan_idx],
- num_rand_blocks=plan_num_rand_blocks[plan_idx],
- window_block_left=window_block_left,
- window_block_right=window_block_right,
- global_block_left=global_block_left,
- global_block_right=global_block_right,
- )
-
- for nh in range(num_heads):
- rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
-
- return rand_attn
-
- @staticmethod
- def _get_single_block_row_attention(
- block_id,
- to_start_block_id,
- to_end_block_id,
- num_rand_blocks,
- window_block_left=1,
- window_block_right=1,
- global_block_left=1,
- global_block_right=1,
- ):
- """
- For a single row block get random row attention.
-
- Args:
- block_id: int. block id of row.
- to_start_block_id: int. random attention column start id.
- to_end_block_id: int. random attention column end id.
- num_rand_blocks: int. number of random blocks to be selected.
- window_block_left: int. number of blocks of window to left of a block.
- window_block_right: int. number of blocks of window to right of a block.
- global_block_left: int. Number of blocks globally used to the left.
- global_block_right: int. Number of blocks globally used to the right.
-
- Returns:
- row containing the random attention vector of size num_rand_blocks.
- """
- # list of to_blocks from which to choose random attention
- to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)
- # permute the blocks
- perm_block = np.random.permutation(to_block_list)
-
- # illegal blocks for the current block id, using window
- illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))
-
- # Add blocks at the start and at the end
- illegal_blocks.extend(list(range(global_block_left)))
- illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))
-
- # The second from_block cannot choose random attention on second last to_block
- if block_id == 1:
- illegal_blocks.append(to_end_block_id - 2)
-
- # The second last from_block cannot choose random attention on second to_block
- if block_id == to_end_block_id - 2:
- illegal_blocks.append(1)
-
- selected_random_blokcs = []
-
- for i in range(to_end_block_id - to_start_block_id):
- if perm_block[i] not in illegal_blocks:
- selected_random_blokcs.append(perm_block[i])
- if len(selected_random_blokcs) == num_rand_blocks:
- break
- return np.array(selected_random_blokcs, dtype=np.int32)
-
-
-# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird
-class BigBirdSelfOutput(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
-class BigBirdAttention(nn.Module):
- def __init__(self, config, seed=None):
- super().__init__()
- self.attention_type = config.attention_type
- self.config = config
- self.seed = seed
-
- if self.config.attention_type == "original_full":
- self.self = BigBirdSelfAttention(config)
- elif self.config.attention_type == "block_sparse":
- self.self = BigBirdBlockSparseAttention(config, seed)
- else:
- raise ValueError(
- f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}"
- )
-
- self.output = BigBirdSelfOutput(config)
-
- def set_attention_type(self, value: str):
- if value not in ["original_full", "block_sparse"]:
- raise ValueError(
- f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
- )
- # attention type is already correctly set
- if value == self.attention_type:
- return
-
- self.attention_type = value
- if value == "original_full":
- # copy all weights to new full attention class
- attn_weights = BigBirdSelfAttention(self.config)
- else:
- # copy all weights to new sparse attention class
- attn_weights = BigBirdBlockSparseAttention(self.config, self.seed)
-
- attn_weights.query = self.self.query
- attn_weights.value = self.self.value
- attn_weights.key = self.self.key
- self.self = attn_weights
- self.attention_type = value
- if not self.training:
- self.self.eval()
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_value=None,
- output_attentions=False,
- # block_sparse config
- band_mask=None,
- from_mask=None,
- to_mask=None,
- from_blocked_mask=None,
- to_blocked_mask=None,
- ):
- # fp16 compatibility
- if band_mask is not None:
- band_mask = band_mask.to(hidden_states.dtype)
- if from_mask is not None:
- from_mask = from_mask.to(hidden_states.dtype)
- if to_mask is not None:
- to_mask = to_mask.to(hidden_states.dtype)
- if self.attention_type == "original_full":
- self_outputs = self.self(
- hidden_states,
- attention_mask,
- head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- past_key_value,
- output_attentions,
- )
- else:
- if encoder_hidden_states is not None:
- raise ValueError("BigBird cannot be used as a decoder when config.attention_type != 'original_full'")
- self_outputs = self.self(
- hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions
- )
-
- attention_output = self.output(self_outputs[0], hidden_states)
- outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
- return outputs
-
-
-# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird
-class BigBirdIntermediate(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
- if isinstance(config.hidden_act, str):
- self.intermediate_act_fn = ACT2FN[config.hidden_act]
- else:
- self.intermediate_act_fn = config.hidden_act
-
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
- hidden_states = self.dense(hidden_states)
- hidden_states = self.intermediate_act_fn(hidden_states)
- return hidden_states
-
-
-# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird
-class BigBirdOutput(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
-class BigBirdLayer(nn.Module):
- def __init__(self, config, seed=None):
- super().__init__()
- self.config = config
- self.attention_type = config.attention_type
- self.chunk_size_feed_forward = config.chunk_size_feed_forward
- self.seq_len_dim = 1
- self.attention = BigBirdAttention(config, seed=seed)
- self.is_decoder = config.is_decoder
- self.add_cross_attention = config.add_cross_attention
- if self.add_cross_attention:
- if not self.is_decoder:
- raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
- self.crossattention = BigBirdAttention(config)
- self.intermediate = BigBirdIntermediate(config)
- self.output = BigBirdOutput(config)
-
- def set_attention_type(self, value: str):
- if value not in ["original_full", "block_sparse"]:
- raise ValueError(
- f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
- )
- # attention type is already correctly set
- if value == self.attention_type:
- return
- self.attention_type = value
- self.attention.set_attention_type(value)
-
- if self.add_cross_attention:
- self.crossattention.set_attention_type(value)
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- band_mask=None,
- from_mask=None,
- to_mask=None,
- blocked_encoder_mask=None,
- past_key_value=None,
- output_attentions=False,
- ):
- # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
- self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
- self_attention_outputs = self.attention(
- hidden_states,
- attention_mask,
- head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- past_key_value=self_attn_past_key_value,
- output_attentions=output_attentions,
- band_mask=band_mask,
- from_mask=from_mask,
- to_mask=to_mask,
- from_blocked_mask=blocked_encoder_mask,
- to_blocked_mask=blocked_encoder_mask,
- )
- attention_output = self_attention_outputs[0]
-
- # if decoder, the last output is tuple of self-attn cache
- if self.is_decoder:
- outputs = self_attention_outputs[1:-1]
- present_key_value = self_attention_outputs[-1]
- else:
- outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
-
- cross_attn_present_key_value = None
- if self.is_decoder and encoder_hidden_states is not None:
- if not hasattr(self, "crossattention"):
- raise ValueError(
- f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
- " cross-attention layers by setting `config.add_cross_attention=True`"
- )
-
- # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
- cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
- cross_attention_outputs = self.crossattention(
- attention_output,
- attention_mask,
- head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- cross_attn_past_key_value,
- output_attentions,
- )
- attention_output = cross_attention_outputs[0]
- outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
-
- # add cross-attn cache to positions 3,4 of present_key_value tuple
- cross_attn_present_key_value = cross_attention_outputs[-1]
- present_key_value = present_key_value + cross_attn_present_key_value
-
- layer_output = apply_chunking_to_forward(
- self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
- )
-
- outputs = (layer_output,) + outputs
-
- # if decoder, return the attn key/values as the last output
- if self.is_decoder:
- outputs = outputs + (present_key_value,)
-
- return outputs
-
- def feed_forward_chunk(self, attention_output):
- intermediate_output = self.intermediate(attention_output)
- layer_output = self.output(intermediate_output, attention_output)
- return layer_output
-
-
-class BigBirdEncoder(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.config = config
- self.attention_type = config.attention_type
-
- self.layer = nn.ModuleList(
- [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]
- )
- self.gradient_checkpointing = False
-
- def set_attention_type(self, value: str):
- if value not in ["original_full", "block_sparse"]:
- raise ValueError(
- f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
- )
- # attention type is already correctly set
- if value == self.attention_type:
- return
- self.attention_type = value
- for layer in self.layer:
- layer.set_attention_type(value)
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_values=None,
- use_cache=None,
- output_attentions=False,
- output_hidden_states=False,
- band_mask=None,
- from_mask=None,
- to_mask=None,
- blocked_encoder_mask=None,
- return_dict=True,
- ) -> Union[BaseModelOutputWithPastAndCrossAttentions, Tuple]:
- all_hidden_states = () if output_hidden_states else None
- all_self_attentions = () if output_attentions else None
- all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
-
- if self.gradient_checkpointing and self.training:
- if use_cache:
- logger.warning_once(
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
- )
- use_cache = False
-
- next_decoder_cache = () if use_cache else None
-
- for i, layer_module in enumerate(self.layer):
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- layer_head_mask = head_mask[i] if head_mask is not None else None
- past_key_value = past_key_values[i] if past_key_values is not None else None
-
- if self.gradient_checkpointing and self.training:
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- return module(*inputs, past_key_value, output_attentions)
-
- return custom_forward
-
- layer_outputs = torch.utils.checkpoint.checkpoint(
- create_custom_forward(layer_module),
- hidden_states,
- attention_mask,
- layer_head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- band_mask,
- from_mask,
- to_mask,
- blocked_encoder_mask,
- )
- else:
- layer_outputs = layer_module(
- hidden_states,
- attention_mask,
- layer_head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- band_mask,
- from_mask,
- to_mask,
- blocked_encoder_mask,
- past_key_value,
- output_attentions,
- )
-
- hidden_states = layer_outputs[0]
- if use_cache:
- next_decoder_cache += (layer_outputs[-1],)
- if output_attentions:
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
- if self.config.add_cross_attention:
- all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
-
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- if not return_dict:
- return tuple(
- v
- for v in [
- hidden_states,
- next_decoder_cache,
- all_hidden_states,
- all_self_attentions,
- all_cross_attentions,
- ]
- if v is not None
- )
- return BaseModelOutputWithPastAndCrossAttentions(
- last_hidden_state=hidden_states,
- past_key_values=next_decoder_cache,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- cross_attentions=all_cross_attentions,
- )
-
-
-# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird
-class BigBirdPredictionHeadTransform(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- if isinstance(config.hidden_act, str):
- self.transform_act_fn = ACT2FN[config.hidden_act]
- else:
- self.transform_act_fn = config.hidden_act
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
-
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
- hidden_states = self.dense(hidden_states)
- hidden_states = self.transform_act_fn(hidden_states)
- hidden_states = self.LayerNorm(hidden_states)
- return hidden_states
-
-
-# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird
-class BigBirdLMPredictionHead(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.transform = BigBirdPredictionHeadTransform(config)
-
- # The output weights are the same as the input embeddings, but there is
- # an output-only bias for each token.
- self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
-
- self.bias = nn.Parameter(torch.zeros(config.vocab_size))
-
- # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
- self.decoder.bias = self.bias
-
- def forward(self, hidden_states):
- hidden_states = self.transform(hidden_states)
- hidden_states = self.decoder(hidden_states)
- return hidden_states
-
-
-# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird
-class BigBirdOnlyMLMHead(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.predictions = BigBirdLMPredictionHead(config)
-
- def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
- prediction_scores = self.predictions(sequence_output)
- return prediction_scores
-
-
-# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird
-class BigBirdOnlyNSPHead(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.seq_relationship = nn.Linear(config.hidden_size, 2)
-
- def forward(self, pooled_output):
- seq_relationship_score = self.seq_relationship(pooled_output)
- return seq_relationship_score
-
-
-# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird
-class BigBirdPreTrainingHeads(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.predictions = BigBirdLMPredictionHead(config)
- self.seq_relationship = nn.Linear(config.hidden_size, 2)
-
- def forward(self, sequence_output, pooled_output):
- prediction_scores = self.predictions(sequence_output)
- seq_relationship_score = self.seq_relationship(pooled_output)
- return prediction_scores, seq_relationship_score
-
-
-class BigBirdPreTrainedModel(PreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- config_class = BigBirdConfig
- load_tf_weights = load_tf_weights_in_big_bird
- base_model_prefix = "bert"
- supports_gradient_checkpointing = True
-
- def _init_weights(self, module):
- """Initialize the weights"""
- if isinstance(module, nn.Linear):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
- def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, BigBirdEncoder):
- module.gradient_checkpointing = value
-
-
-BIG_BIRD_START_DOCSTRING = r"""
- This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
- it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
- behavior.
-
- Parameters:
- config ([`BigBirdConfig`]): Model configuration class with all the parameters of the model.
- Initializing with a config file does not load the weights associated with the model, only the
- configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
-"""
-
-BIG_BIRD_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`torch.LongTensor` of shape `({0})`):
- Indices of input sequence tokens in the vocabulary.
-
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
-
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
-
- [What are attention masks?](../glossary#attention-mask)
- token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
- 1]`:
-
- - 0 corresponds to a *sentence A* token,
- - 1 corresponds to a *sentence B* token.
-
- [What are token type IDs?](../glossary#token-type-ids)
- position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
- config.max_position_embeddings - 1]`.
-
- [What are position IDs?](../glossary#position-ids)
- head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
- Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
-
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
-
- inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
- is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
- model's internal embedding lookup matrix.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
-"""
-
-
-@dataclass
-class BigBirdForPreTrainingOutput(ModelOutput):
- """
- Output type of [`BigBirdForPreTraining`].
-
- Args:
- loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
- Total loss as the sum of the masked language modeling loss and the next sequence prediction
- (classification) loss.
- prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
- seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
- Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
- before SoftMax).
- hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
- shape `(batch_size, sequence_length, hidden_size)`.
-
- Hidden-states of the model at the output of each layer plus the initial embedding outputs.
- attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
- Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
- sequence_length)`.
-
- Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
- heads.
- """
-
- loss: Optional[torch.FloatTensor] = None
- prediction_logits: torch.FloatTensor = None
- seq_relationship_logits: torch.FloatTensor = None
- hidden_states: Optional[Tuple[torch.FloatTensor]] = None
- attentions: Optional[Tuple[torch.FloatTensor]] = None
-
-
-@dataclass
-class BigBirdForQuestionAnsweringModelOutput(ModelOutput):
- """
- Base class for outputs of question answering models.
-
- Args:
- loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
- Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
- start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
- Span-start scores (before SoftMax).
- end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
- Span-end scores (before SoftMax).
- pooler_output (`torch.FloatTensor` of shape `(batch_size, 1)`):
- pooler output from BigBigModel
- hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
- shape `(batch_size, sequence_length, hidden_size)`.
-
- Hidden-states of the model at the output of each layer plus the initial embedding outputs.
- attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
- Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
- sequence_length)`.
-
- Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
- heads.
- """
-
- loss: Optional[torch.FloatTensor] = None
- start_logits: torch.FloatTensor = None
- end_logits: torch.FloatTensor = None
- pooler_output: torch.FloatTensor = None
- hidden_states: Optional[Tuple[torch.FloatTensor]] = None
- attentions: Optional[Tuple[torch.FloatTensor]] = None
-
-
-@add_start_docstrings(
- "The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.",
- BIG_BIRD_START_DOCSTRING,
-)
-class BigBirdModel(BigBirdPreTrainedModel):
- """
-
- The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
- cross-attention is added between the self-attention layers, following the architecture described in [Attention is
- all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
- Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
-
- To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
- to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
- `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
- """
-
- def __init__(self, config, add_pooling_layer=True):
- super().__init__(config)
- self.attention_type = self.config.attention_type
- self.config = config
-
- self.block_size = self.config.block_size
-
- self.embeddings = BigBirdEmbeddings(config)
- self.encoder = BigBirdEncoder(config)
-
- if add_pooling_layer:
- self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
- self.activation = nn.Tanh()
- else:
- self.pooler = None
- self.activation = None
-
- if self.attention_type != "original_full" and config.add_cross_attention:
- logger.warning(
- "When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting"
- " `attention_type=original_full`"
- )
- self.set_attention_type("original_full")
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.embeddings.word_embeddings
-
- def set_input_embeddings(self, value):
- self.embeddings.word_embeddings = value
-
- def set_attention_type(self, value: str):
- if value not in ["original_full", "block_sparse"]:
- raise ValueError(
- f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
- )
- # attention type is already correctly set
- if value == self.attention_type:
- return
- self.attention_type = value
- self.encoder.set_attention_type(value)
-
- @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=BaseModelOutputWithPoolingAndCrossAttentions,
- config_class=_CONFIG_FOR_DOC,
- )
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]:
- r"""
- encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
- the model is configured as a decoder.
- encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
- the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
- use_cache (`bool`, *optional*):
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
- `past_key_values`).
- """
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if self.config.is_decoder:
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- else:
- use_cache = False
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
- input_shape = input_ids.size()
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- batch_size, seq_length = input_shape
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- # past_key_values_length
- past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
-
- if attention_mask is None:
- attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
- if token_type_ids is None:
- if hasattr(self.embeddings, "token_type_ids"):
- buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
- buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
- token_type_ids = buffered_token_type_ids_expanded
- else:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
-
- # in order to use block_sparse attention, sequence_length has to be at least
- # bigger than all global attentions: 2 * block_size
- # + sliding tokens: 3 * block_size
- # + random tokens: 2 * num_random_blocks * block_size
- max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size
- if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend:
- # change attention_type from block_sparse to original_full
- sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
- logger.warning(
- "Attention type 'block_sparse' is not possible if sequence_length: "
- f"{sequence_length} <= num global tokens: 2 * config.block_size "
- "+ min. num sliding tokens: 3 * config.block_size "
- "+ config.num_random_blocks * config.block_size "
- "+ additional buffer: config.num_random_blocks * config.block_size "
- f"= {max_tokens_to_attend} with config.block_size "
- f"= {self.config.block_size}, config.num_random_blocks "
- f"= {self.config.num_random_blocks}. "
- "Changing attention type to 'original_full'..."
- )
- self.set_attention_type("original_full")
-
- if self.attention_type == "block_sparse":
- (
- padding_len,
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- inputs_embeds,
- ) = self._pad_to_block_size(
- input_ids=input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- inputs_embeds=inputs_embeds,
- pad_token_id=self.config.pad_token_id,
- )
- else:
- padding_len = 0
-
- if self.attention_type == "block_sparse":
- blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(
- attention_mask, self.block_size
- )
- extended_attention_mask = None
-
- elif self.attention_type == "original_full":
- blocked_encoder_mask = None
- band_mask = None
- from_mask = None
- to_mask = None
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
- # ourselves in which case we just need to make it broadcastable to all heads.
- extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
- else:
- raise ValueError(
- f"attention_type can either be original_full or block_sparse, but is {self.attention_type}"
- )
-
- # If a 2D or 3D attention mask is provided for the cross-attention
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
- if self.config.is_decoder and encoder_hidden_states is not None:
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
- if encoder_attention_mask is None:
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
- encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
- else:
- encoder_extended_attention_mask = None
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x n_heads x N x N
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
-
- embedding_output = self.embeddings(
- input_ids=input_ids,
- position_ids=position_ids,
- token_type_ids=token_type_ids,
- inputs_embeds=inputs_embeds,
- past_key_values_length=past_key_values_length,
- )
-
- encoder_outputs = self.encoder(
- embedding_output,
- attention_mask=extended_attention_mask,
- head_mask=head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_extended_attention_mask,
- past_key_values=past_key_values,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- band_mask=band_mask,
- from_mask=from_mask,
- to_mask=to_mask,
- blocked_encoder_mask=blocked_encoder_mask,
- return_dict=return_dict,
- )
- sequence_output = encoder_outputs[0]
-
- pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None
-
- # undo padding
- if padding_len > 0:
- # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)
- sequence_output = sequence_output[:, :-padding_len]
-
- if not return_dict:
- return (sequence_output, pooler_output) + encoder_outputs[1:]
-
- return BaseModelOutputWithPoolingAndCrossAttentions(
- last_hidden_state=sequence_output,
- pooler_output=pooler_output,
- past_key_values=encoder_outputs.past_key_values,
- hidden_states=encoder_outputs.hidden_states,
- attentions=encoder_outputs.attentions,
- cross_attentions=encoder_outputs.cross_attentions,
- )
-
- @staticmethod
- def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int):
- batch_size, seq_length = attention_mask.size()
- if seq_length % block_size != 0:
- raise ValueError(
- f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block"
- f" size is {block_size}."
- )
-
- def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):
- """
- Create 3D attention mask from a 2D tensor mask.
-
- Args:
- from_blocked_mask: 2D Tensor of shape [batch_size,
- from_seq_length//from_block_size, from_block_size].
- to_blocked_mask: int32 Tensor of shape [batch_size,
- to_seq_length//to_block_size, to_block_size].
-
- Returns:
- float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,
- 3*to_block_size].
- """
- exp_blocked_to_pad = torch.cat(
- [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2
- )
- band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)
- band_mask.unsqueeze_(1)
- return band_mask
-
- blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size)
- band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask)
-
- from_mask = attention_mask.view(batch_size, 1, seq_length, 1)
- to_mask = attention_mask.view(batch_size, 1, 1, seq_length)
-
- return blocked_encoder_mask, band_mask, from_mask, to_mask
-
- def _pad_to_block_size(
- self,
- input_ids: torch.Tensor,
- attention_mask: torch.Tensor,
- token_type_ids: torch.Tensor,
- position_ids: torch.Tensor,
- inputs_embeds: torch.Tensor,
- pad_token_id: int,
- ):
- """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention."""
- # padding
- block_size = self.config.block_size
-
- input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
- batch_size, seq_len = input_shape[:2]
-
- padding_len = (block_size - seq_len % block_size) % block_size
- if padding_len > 0:
- logger.info(
- f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
- f"`config.block_size`: {block_size}"
- )
- if input_ids is not None:
- input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id)
- if position_ids is not None:
- # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings
- position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id)
- if inputs_embeds is not None:
- input_ids_padding = inputs_embeds.new_full(
- (batch_size, padding_len),
- self.config.pad_token_id,
- dtype=torch.long,
- )
- inputs_embeds_padding = self.embeddings(input_ids_padding)
- inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
-
- attention_mask = nn.functional.pad(
- attention_mask, (0, padding_len), value=False
- ) # no attention on the padding tokens
- token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
-
- return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
-
-
-class BigBirdForPreTraining(BigBirdPreTrainedModel):
- _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
-
- def __init__(self, config):
- super().__init__(config)
-
- self.bert = BigBirdModel(config, add_pooling_layer=True)
- self.cls = BigBirdPreTrainingHeads(config)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_output_embeddings(self):
- return self.cls.predictions.decoder
-
- def set_output_embeddings(self, new_embeddings):
- self.cls.predictions.decoder = new_embeddings
-
- @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.FloatTensor] = None,
- next_sentence_label: Optional[torch.LongTensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[BigBirdForPreTrainingOutput, Tuple[torch.FloatTensor]]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
- config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
- loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
- next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be
- added to masked_lm loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in
- `[0, 1]`:
-
- - 0 indicates sequence B is a continuation of sequence A,
- - 1 indicates sequence B is a random sequence.
- kwargs (`Dict[str, any]`, optional, defaults to *{}*):
- Used to hide legacy arguments that have been deprecated.
-
- Returns:
-
- Example:
-
- ```python
- >>> from transformers import AutoTokenizer, BigBirdForPreTraining
- >>> import torch
-
- >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base")
- >>> model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base")
-
- >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
- >>> outputs = model(**inputs)
-
- >>> prediction_logits = outputs.prediction_logits
- >>> seq_relationship_logits = outputs.seq_relationship_logits
- ```"""
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- sequence_output, pooled_output = outputs[:2]
- prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
-
- total_loss = None
- if labels is not None:
- loss_fct = CrossEntropyLoss()
- total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
-
- if next_sentence_label is not None and total_loss is not None:
- next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
- total_loss = total_loss + next_sentence_loss
-
- if not return_dict:
- output = (prediction_scores, seq_relationship_score) + outputs[2:]
- return ((total_loss,) + output) if total_loss is not None else output
-
- return BigBirdForPreTrainingOutput(
- loss=total_loss,
- prediction_logits=prediction_scores,
- seq_relationship_logits=seq_relationship_score,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
-
-@add_start_docstrings("""BigBird Model with a `language modeling` head on top.""", BIG_BIRD_START_DOCSTRING)
-class BigBirdForMaskedLM(BigBirdPreTrainedModel):
- _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
-
- def __init__(self, config):
- super().__init__(config)
-
- if config.is_decoder:
- logger.warning(
- "If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for "
- "bi-directional self-attention."
- )
-
- self.bert = BigBirdModel(config)
- self.cls = BigBirdOnlyMLMHead(config)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_output_embeddings(self):
- return self.cls.predictions.decoder
-
- def set_output_embeddings(self, new_embeddings):
- self.cls.predictions.decoder = new_embeddings
-
- @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
- config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
- loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
-
- Returns:
-
- Example:
-
- ```python
- >>> import torch
- >>> from transformers import AutoTokenizer, BigBirdForMaskedLM
- >>> from datasets import load_dataset
-
- >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base")
- >>> model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base")
- >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT
-
- >>> # select random long article
- >>> LONG_ARTICLE_TARGET = squad_ds[81514]["context"]
- >>> # select random sentence
- >>> LONG_ARTICLE_TARGET[332:398]
- 'the highest values are very close to the theoretical maximum value'
-
- >>> # add mask_token
- >>> LONG_ARTICLE_TO_MASK = LONG_ARTICLE_TARGET.replace("maximum", "[MASK]")
- >>> inputs = tokenizer(LONG_ARTICLE_TO_MASK, return_tensors="pt")
- >>> # long article input
- >>> list(inputs["input_ids"].shape)
- [1, 919]
-
- >>> with torch.no_grad():
- ... logits = model(**inputs).logits
- >>> # retrieve index of [MASK]
- >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
- >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
- >>> tokenizer.decode(predicted_token_id)
- 'maximum'
- ```
-
- ```python
- >>> labels = tokenizer(LONG_ARTICLE_TARGET, return_tensors="pt")["input_ids"]
- >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
- >>> outputs = model(**inputs, labels=labels)
- >>> round(outputs.loss.item(), 2)
- 1.99
- ```
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- sequence_output = outputs[0]
- prediction_scores = self.cls(sequence_output)
-
- masked_lm_loss = None
- if labels is not None:
- loss_fct = CrossEntropyLoss() # -100 index = padding token
- masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
-
- if not return_dict:
- output = (prediction_scores,) + outputs[2:]
- return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
-
- return MaskedLMOutput(
- loss=masked_lm_loss,
- logits=prediction_scores,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
- def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
- input_shape = input_ids.shape
- effective_batch_size = input_shape[0]
-
- # add a dummy token
- if self.config.pad_token_id is None:
- raise ValueError("The PAD token should be defined for generation")
- attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
- dummy_token = torch.full(
- (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
- )
- input_ids = torch.cat([input_ids, dummy_token], dim=1)
-
- return {"input_ids": input_ids, "attention_mask": attention_mask}
-
-
-@add_start_docstrings(
- """BigBird Model with a `language modeling` head on top for CLM fine-tuning.""", BIG_BIRD_START_DOCSTRING
-)
-class BigBirdForCausalLM(BigBirdPreTrainedModel):
- _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
-
- def __init__(self, config):
- super().__init__(config)
-
- if not config.is_decoder:
- logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`")
-
- self.bert = BigBirdModel(config)
- self.cls = BigBirdOnlyMLMHead(config)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_output_embeddings(self):
- return self.cls.predictions.decoder
-
- def set_output_embeddings(self, new_embeddings):
- self.cls.predictions.decoder = new_embeddings
-
- @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=CausalLMOutputWithCrossAttentions,
- config_class=_CONFIG_FOR_DOC,
- )
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
- labels: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[CausalLMOutputWithCrossAttentions, Tuple[torch.FloatTensor]]:
- r"""
- encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
- the model is configured as a decoder.
- encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
- the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
- `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
- ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
- use_cache (`bool`, *optional*):
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
- `past_key_values`).
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- past_key_values=past_key_values,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- sequence_output = outputs[0]
- prediction_scores = self.cls(sequence_output)
-
- lm_loss = None
- if labels is not None:
- # we are doing next-token prediction; shift prediction scores and input ids by one
- shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
- labels = labels[:, 1:].contiguous()
- loss_fct = CrossEntropyLoss()
- lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
-
- if not return_dict:
- output = (prediction_scores,) + outputs[2:]
- return ((lm_loss,) + output) if lm_loss is not None else output
-
- return CausalLMOutputWithCrossAttentions(
- loss=lm_loss,
- logits=prediction_scores,
- past_key_values=outputs.past_key_values,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- cross_attentions=outputs.cross_attentions,
- )
-
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
- input_shape = input_ids.shape
-
- # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
- if attention_mask is None:
- attention_mask = input_ids.new_ones(input_shape)
-
- # cut decoder_input_ids if past is used
- if past_key_values is not None:
- input_ids = input_ids[:, -1:]
-
- return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
-
- def _reorder_cache(self, past_key_values, beam_idx):
- reordered_past = ()
- for layer_past in past_key_values:
- reordered_past += (
- tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
- + layer_past[2:],
- )
- return reordered_past
-
-
-class BigBirdClassificationHead(nn.Module):
- """Head for sentence-level classification tasks."""
-
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- classifier_dropout = (
- config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
- )
- self.dropout = nn.Dropout(classifier_dropout)
- self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
-
- self.config = config
-
- def forward(self, features, **kwargs):
- x = features[:, 0, :] # take token (equiv. to [CLS])
- x = self.dropout(x)
- x = self.dense(x)
- x = ACT2FN[self.config.hidden_act](x)
- x = self.dropout(x)
- x = self.out_proj(x)
- return x
-
-
-@add_start_docstrings(
- """
- BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the
- pooled output) e.g. for GLUE tasks.
- """,
- BIG_BIRD_START_DOCSTRING,
-)
-class BigBirdForSequenceClassification(BigBirdPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- self.num_labels = config.num_labels
- self.config = config
- self.bert = BigBirdModel(config)
- self.classifier = BigBirdClassificationHead(config)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
-
- Returns:
-
- Example:
-
- ```python
- >>> import torch
- >>> from transformers import AutoTokenizer, BigBirdForSequenceClassification
- >>> from datasets import load_dataset
-
- >>> tokenizer = AutoTokenizer.from_pretrained("l-yohai/bigbird-roberta-base-mnli")
- >>> model = BigBirdForSequenceClassification.from_pretrained("l-yohai/bigbird-roberta-base-mnli")
- >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT
-
- >>> LONG_ARTICLE = squad_ds[81514]["context"]
- >>> inputs = tokenizer(LONG_ARTICLE, return_tensors="pt")
- >>> # long input article
- >>> list(inputs["input_ids"].shape)
- [1, 919]
-
- >>> with torch.no_grad():
- ... logits = model(**inputs).logits
- >>> predicted_class_id = logits.argmax().item()
- >>> model.config.id2label[predicted_class_id]
- 'LABEL_0'
- ```
-
- ```python
- >>> num_labels = len(model.config.id2label)
- >>> model = BigBirdForSequenceClassification.from_pretrained(
- ... "l-yohai/bigbird-roberta-base-mnli", num_labels=num_labels
- ... )
- >>> labels = torch.tensor(1)
- >>> loss = model(**inputs, labels=labels).loss
- >>> round(loss.item(), 2)
- 1.13
- ```
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- sequence_output = outputs[0]
- logits = self.classifier(sequence_output)
-
- loss = None
- if labels is not None:
- if self.config.problem_type is None:
- if self.num_labels == 1:
- self.config.problem_type = "regression"
- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
- self.config.problem_type = "single_label_classification"
- else:
- self.config.problem_type = "multi_label_classification"
-
- if self.config.problem_type == "regression":
- loss_fct = MSELoss()
- if self.num_labels == 1:
- loss = loss_fct(logits.squeeze(), labels.squeeze())
- else:
- loss = loss_fct(logits, labels)
- elif self.config.problem_type == "single_label_classification":
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
- elif self.config.problem_type == "multi_label_classification":
- loss_fct = BCEWithLogitsLoss()
- loss = loss_fct(logits, labels)
-
- if not return_dict:
- output = (logits,) + outputs[2:]
- return ((loss,) + output) if loss is not None else output
-
- return SequenceClassifierOutput(
- loss=loss,
- logits=logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
-
-@add_start_docstrings(
- """
- BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
- softmax) e.g. for RocStories/SWAG tasks.
- """,
- BIG_BIRD_START_DOCSTRING,
-)
-class BigBirdForMultipleChoice(BigBirdPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
-
- self.bert = BigBirdModel(config)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- self.classifier = nn.Linear(config.hidden_size, 1)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- @add_start_docstrings_to_model_forward(
- BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
- )
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=MultipleChoiceModelOutput,
- config_class=_CONFIG_FOR_DOC,
- )
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
- num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
- `input_ids` above)
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
-
- input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
- attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
- token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
- position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
- inputs_embeds = (
- inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
- if inputs_embeds is not None
- else None
- )
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- pooled_output = outputs[1]
-
- pooled_output = self.dropout(pooled_output)
- logits = self.classifier(pooled_output)
- reshaped_logits = logits.view(-1, num_choices)
-
- loss = None
- if labels is not None:
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(reshaped_logits, labels)
-
- if not return_dict:
- output = (reshaped_logits,) + outputs[2:]
- return ((loss,) + output) if loss is not None else output
-
- return MultipleChoiceModelOutput(
- loss=loss,
- logits=reshaped_logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
-
-@add_start_docstrings(
- """
- BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
- Named-Entity-Recognition (NER) tasks.
- """,
- BIG_BIRD_START_DOCSTRING,
-)
-class BigBirdForTokenClassification(BigBirdPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- self.num_labels = config.num_labels
-
- self.bert = BigBirdModel(config)
- classifier_dropout = (
- config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
- )
- self.dropout = nn.Dropout(classifier_dropout)
- self.classifier = nn.Linear(config.hidden_size, config.num_labels)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=TokenClassifierOutput,
- config_class=_CONFIG_FOR_DOC,
- )
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- sequence_output = outputs[0]
-
- sequence_output = self.dropout(sequence_output)
- logits = self.classifier(sequence_output)
-
- loss = None
- if labels is not None:
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
-
- if not return_dict:
- output = (logits,) + outputs[2:]
- return ((loss,) + output) if loss is not None else output
-
- return TokenClassifierOutput(
- loss=loss,
- logits=logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
-
-class BigBirdForQuestionAnsweringHead(nn.Module):
- """Head for question answering tasks."""
-
- def __init__(self, config):
- super().__init__()
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- self.intermediate = BigBirdIntermediate(config)
- self.output = BigBirdOutput(config)
- self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
-
- def forward(self, encoder_output):
- hidden_states = self.dropout(encoder_output)
- hidden_states = self.intermediate(hidden_states)
- hidden_states = self.output(hidden_states, encoder_output)
- hidden_states = self.qa_outputs(hidden_states)
- return hidden_states
-
-
-@add_start_docstrings(
- """
- BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
- layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
- """,
- BIG_BIRD_START_DOCSTRING,
-)
-class BigBirdForQuestionAnswering(BigBirdPreTrainedModel):
- def __init__(self, config, add_pooling_layer=False):
- super().__init__(config)
-
- config.num_labels = 2
- self.num_labels = config.num_labels
- self.sep_token_id = config.sep_token_id
-
- self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer)
- self.qa_classifier = BigBirdForQuestionAnsweringHead(config)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @replace_return_docstrings(output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- question_lengths: Optional[torch.Tensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- start_positions: Optional[torch.LongTensor] = None,
- end_positions: Optional[torch.LongTensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[BigBirdForQuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]:
- r"""
- start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for position (index) of the start of the labelled span for computing the token classification loss.
- Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
- are not taken into account for computing the loss.
- end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for position (index) of the end of the labelled span for computing the token classification loss.
- Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
- are not taken into account for computing the loss.
-
- Returns:
-
- Example:
-
- ```python
- >>> import torch
- >>> from transformers import AutoTokenizer, BigBirdForQuestionAnswering
- >>> from datasets import load_dataset
-
- >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base")
- >>> model = BigBirdForQuestionAnswering.from_pretrained("google/bigbird-roberta-base")
- >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT
-
- >>> # select random article and question
- >>> LONG_ARTICLE = squad_ds[81514]["context"]
- >>> QUESTION = squad_ds[81514]["question"]
- >>> QUESTION
- 'During daytime how high can the temperatures reach?'
-
- >>> inputs = tokenizer(QUESTION, LONG_ARTICLE, return_tensors="pt")
- >>> # long article and question input
- >>> list(inputs["input_ids"].shape)
- [1, 929]
-
- >>> with torch.no_grad():
- ... outputs = model(**inputs)
-
- >>> answer_start_index = outputs.start_logits.argmax()
- >>> answer_end_index = outputs.end_logits.argmax()
- >>> predict_answer_token_ids = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
- >>> predict_answer_token = tokenizer.decode(predict_answer_token_ids)
- ```
-
- ```python
- >>> target_start_index, target_end_index = torch.tensor([130]), torch.tensor([132])
- >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index)
- >>> loss = outputs.loss
- ```
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
-
- if question_lengths is None and input_ids is not None:
- # assuming input_ids format: context
- question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1
- question_lengths.unsqueeze_(1)
-
- logits_mask = None
- if question_lengths is not None:
- # setting lengths logits to `-inf`
- logits_mask = self.prepare_question_mask(question_lengths, seqlen)
- if token_type_ids is None:
- token_type_ids = torch.ones(logits_mask.size(), dtype=int, device=logits_mask.device) - logits_mask
- logits_mask = logits_mask
- logits_mask[:, 0] = False
- logits_mask.unsqueeze_(2)
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- sequence_output = outputs[0]
- logits = self.qa_classifier(sequence_output)
-
- if logits_mask is not None:
- # removing question tokens from the competition
- logits = logits - logits_mask * 1e6
-
- start_logits, end_logits = logits.split(1, dim=-1)
- start_logits = start_logits.squeeze(-1).contiguous()
- end_logits = end_logits.squeeze(-1).contiguous()
-
- total_loss = None
- if start_positions is not None and end_positions is not None:
- # If we are on multi-GPU, split add a dimension
- if len(start_positions.size()) > 1:
- start_positions = start_positions.squeeze(-1)
- if len(end_positions.size()) > 1:
- end_positions = end_positions.squeeze(-1)
- # sometimes the start/end positions are outside our model inputs, we ignore these terms
- ignored_index = start_logits.size(1)
- start_positions = start_positions.clamp(0, ignored_index)
- end_positions = end_positions.clamp(0, ignored_index)
-
- loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
- start_loss = loss_fct(start_logits, start_positions)
- end_loss = loss_fct(end_logits, end_positions)
- total_loss = (start_loss + end_loss) / 2
-
- if not return_dict:
- output = (start_logits, end_logits) + outputs[2:]
- return ((total_loss,) + output) if total_loss is not None else output
-
- return BigBirdForQuestionAnsweringModelOutput(
- loss=total_loss,
- start_logits=start_logits,
- end_logits=end_logits,
- pooler_output=outputs.pooler_output,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
- @staticmethod
- def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int):
- # q_lengths -> (bz, 1)
- mask = torch.arange(0, maxlen).to(q_lengths.device)
- mask.unsqueeze_(0) # -> (1, maxlen)
- mask = torch.where(mask < q_lengths, 1, 0)
- return mask
diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv2/tokenization_layoutlmv2_fast.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv2/tokenization_layoutlmv2_fast.py
deleted file mode 100644
index bed4e133aa3c5ceec5b2277390ecfb41e56b4e1c..0000000000000000000000000000000000000000
--- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv2/tokenization_layoutlmv2_fast.py
+++ /dev/null
@@ -1,817 +0,0 @@
-# coding=utf-8
-# Copyright 2021 The HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
-and _encode_plus, in which the Rust tokenizer is used.
-"""
-
-import json
-from typing import Dict, List, Optional, Tuple, Union
-
-from tokenizers import normalizers
-
-from ...tokenization_utils_base import (
- BatchEncoding,
- EncodedInput,
- PaddingStrategy,
- PreTokenizedInput,
- TensorType,
- TextInput,
- TextInputPair,
- TruncationStrategy,
-)
-from ...tokenization_utils_fast import PreTrainedTokenizerFast
-from ...utils import add_end_docstrings, logging
-from .tokenization_layoutlmv2 import (
- LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING,
- LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
- LayoutLMv2Tokenizer,
-)
-
-
-logger = logging.get_logger(__name__)
-
-VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
-
-PRETRAINED_VOCAB_FILES_MAP = {
- "vocab_file": {
- "microsoft/layoutlmv2-base-uncased": (
- "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt"
- ),
- },
- "tokenizer_file": {
- "microsoft/layoutlmv2-base-uncased": (
- "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/tokenizer.json"
- ),
- },
-}
-
-PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
- "microsoft/layoutlmv2-base-uncased": 512,
-}
-
-PRETRAINED_INIT_CONFIGURATION = {
- "microsoft/layoutlmv2-base-uncased": {"do_lower_case": True},
-}
-
-
-class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
- r"""
- Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
-
- This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
- refer to this superclass for more information regarding those methods.
-
- Args:
- vocab_file (`str`):
- File containing the vocabulary.
- do_lower_case (`bool`, *optional*, defaults to `True`):
- Whether or not to lowercase the input when tokenizing.
- unk_token (`str`, *optional*, defaults to `"[UNK]"`):
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
- token instead.
- sep_token (`str`, *optional*, defaults to `"[SEP]"`):
- The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
- sequence classification or for a text and a question for question answering. It is also used as the last
- token of a sequence built with special tokens.
- pad_token (`str`, *optional*, defaults to `"[PAD]"`):
- The token used for padding, for example when batching sequences of different lengths.
- cls_token (`str`, *optional*, defaults to `"[CLS]"`):
- The classifier token which is used when doing sequence classification (classification of the whole sequence
- instead of per-token classification). It is the first token of the sequence when built with special tokens.
- mask_token (`str`, *optional*, defaults to `"[MASK]"`):
- The token used for masking values. This is the token used when training this model with masked language
- modeling. This is the token which the model will try to predict.
- cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
- The bounding box to use for the special [CLS] token.
- sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
- The bounding box to use for the special [SEP] token.
- pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
- The bounding box to use for the special [PAD] token.
- pad_token_label (`int`, *optional*, defaults to -100):
- The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
- CrossEntropyLoss.
- only_label_first_subword (`bool`, *optional*, defaults to `True`):
- Whether or not to only label the first subword, in case word labels are provided.
- tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
- Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
- issue](https://github.com/huggingface/transformers/issues/328)).
- strip_accents (`bool`, *optional*):
- Whether or not to strip all accents. If this option is not specified, then it will be determined by the
- value for `lowercase` (as in the original LayoutLMv2).
- """
-
- vocab_files_names = VOCAB_FILES_NAMES
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
- pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
- max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
- slow_tokenizer_class = LayoutLMv2Tokenizer
-
- def __init__(
- self,
- vocab_file=None,
- tokenizer_file=None,
- do_lower_case=True,
- unk_token="[UNK]",
- sep_token="[SEP]",
- pad_token="[PAD]",
- cls_token="[CLS]",
- mask_token="[MASK]",
- cls_token_box=[0, 0, 0, 0],
- sep_token_box=[1000, 1000, 1000, 1000],
- pad_token_box=[0, 0, 0, 0],
- pad_token_label=-100,
- only_label_first_subword=True,
- tokenize_chinese_chars=True,
- strip_accents=None,
- **kwargs,
- ):
- super().__init__(
- vocab_file,
- tokenizer_file=tokenizer_file,
- do_lower_case=do_lower_case,
- unk_token=unk_token,
- sep_token=sep_token,
- pad_token=pad_token,
- cls_token=cls_token,
- mask_token=mask_token,
- cls_token_box=cls_token_box,
- sep_token_box=sep_token_box,
- pad_token_box=pad_token_box,
- pad_token_label=pad_token_label,
- only_label_first_subword=only_label_first_subword,
- tokenize_chinese_chars=tokenize_chinese_chars,
- strip_accents=strip_accents,
- **kwargs,
- )
-
- pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
- if (
- pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
- or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
- ):
- pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
- pre_tok_state["lowercase"] = do_lower_case
- pre_tok_state["strip_accents"] = strip_accents
- self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
-
- self.do_lower_case = do_lower_case
-
- # additional properties
- self.cls_token_box = cls_token_box
- self.sep_token_box = sep_token_box
- self.pad_token_box = pad_token_box
- self.pad_token_label = pad_token_label
- self.only_label_first_subword = only_label_first_subword
-
- @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def __call__(
- self,
- text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
- text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
- boxes: Union[List[List[int]], List[List[List[int]]]] = None,
- word_labels: Optional[Union[List[int], List[List[int]]]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- """
- Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
- sequences with word-level normalized bounding boxes and optional labels.
-
- Args:
- text (`str`, `List[str]`, `List[List[str]]`):
- The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
- (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
- words).
- text_pair (`List[str]`, `List[List[str]]`):
- The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
- (pretokenized string).
- boxes (`List[List[int]]`, `List[List[List[int]]]`):
- Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
- word_labels (`List[int]`, `List[List[int]]`, *optional*):
- Word-level integer labels (for token classification tasks such as FUNSD, CORD).
- """
-
- # Input type checking for clearer error
- def _is_valid_text_input(t):
- if isinstance(t, str):
- # Strings are fine
- return True
- elif isinstance(t, (list, tuple)):
- # List are fine as long as they are...
- if len(t) == 0:
- # ... empty
- return True
- elif isinstance(t[0], str):
- # ... list of strings
- return True
- elif isinstance(t[0], (list, tuple)):
- # ... list with an empty list or with a list of strings
- return len(t[0]) == 0 or isinstance(t[0][0], str)
- else:
- return False
- else:
- return False
-
- if text_pair is not None:
- # in case text + text_pair are provided, text = questions, text_pair = words
- if not _is_valid_text_input(text):
- raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
- if not isinstance(text_pair, (list, tuple)):
- raise ValueError(
- "Words must be of type `List[str]` (single pretokenized example), "
- "or `List[List[str]]` (batch of pretokenized examples)."
- )
- else:
- # in case only text is provided => must be words
- if not isinstance(text, (list, tuple)):
- raise ValueError(
- "Words must be of type `List[str]` (single pretokenized example), "
- "or `List[List[str]]` (batch of pretokenized examples)."
- )
-
- if text_pair is not None:
- is_batched = isinstance(text, (list, tuple))
- else:
- is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
-
- words = text if text_pair is None else text_pair
- if boxes is None:
- raise ValueError("You must provide corresponding bounding boxes")
- if is_batched:
- if len(words) != len(boxes):
- raise ValueError("You must provide words and boxes for an equal amount of examples")
- for words_example, boxes_example in zip(words, boxes):
- if len(words_example) != len(boxes_example):
- raise ValueError("You must provide as many words as there are bounding boxes")
- else:
- if len(words) != len(boxes):
- raise ValueError("You must provide as many words as there are bounding boxes")
-
- if is_batched:
- if text_pair is not None and len(text) != len(text_pair):
- raise ValueError(
- f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
- f" {len(text_pair)}."
- )
- batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
- is_pair = bool(text_pair is not None)
- return self.batch_encode_plus(
- batch_text_or_text_pairs=batch_text_or_text_pairs,
- is_pair=is_pair,
- boxes=boxes,
- word_labels=word_labels,
- add_special_tokens=add_special_tokens,
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
- else:
- return self.encode_plus(
- text=text,
- text_pair=text_pair,
- boxes=boxes,
- word_labels=word_labels,
- add_special_tokens=add_special_tokens,
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
-
- @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def batch_encode_plus(
- self,
- batch_text_or_text_pairs: Union[
- List[TextInput],
- List[TextInputPair],
- List[PreTokenizedInput],
- ],
- is_pair: bool = None,
- boxes: Optional[List[List[List[int]]]] = None,
- word_labels: Optional[Union[List[int], List[List[int]]]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
- padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- pad_to_multiple_of=pad_to_multiple_of,
- verbose=verbose,
- **kwargs,
- )
-
- return self._batch_encode_plus(
- batch_text_or_text_pairs=batch_text_or_text_pairs,
- is_pair=is_pair,
- boxes=boxes,
- word_labels=word_labels,
- add_special_tokens=add_special_tokens,
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
-
- def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
- batched_input = [(text, pair)] if pair else [text]
- encodings = self._tokenizer.encode_batch(
- batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
- )
-
- return encodings[0].tokens
-
- @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def encode_plus(
- self,
- text: Union[TextInput, PreTokenizedInput],
- text_pair: Optional[PreTokenizedInput] = None,
- boxes: Optional[List[List[int]]] = None,
- word_labels: Optional[List[int]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- """
- Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
- `__call__` should be used instead.
-
- Args:
- text (`str`, `List[str]`, `List[List[str]]`):
- The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
- text_pair (`List[str]` or `List[int]`, *optional*):
- Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
- list of list of strings (words of a batch of examples).
- """
-
- # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
- padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- pad_to_multiple_of=pad_to_multiple_of,
- verbose=verbose,
- **kwargs,
- )
-
- return self._encode_plus(
- text=text,
- boxes=boxes,
- text_pair=text_pair,
- word_labels=word_labels,
- add_special_tokens=add_special_tokens,
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
-
- def _batch_encode_plus(
- self,
- batch_text_or_text_pairs: Union[
- List[TextInput],
- List[TextInputPair],
- List[PreTokenizedInput],
- ],
- is_pair: bool = None,
- boxes: Optional[List[List[List[int]]]] = None,
- word_labels: Optional[List[List[int]]] = None,
- add_special_tokens: bool = True,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- return_tensors: Optional[str] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- ) -> BatchEncoding:
- if not isinstance(batch_text_or_text_pairs, list):
- raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
-
- # Set the truncation and padding strategy and restore the initial configuration
- self.set_truncation_and_padding(
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- )
-
- if is_pair:
- batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
-
- encodings = self._tokenizer.encode_batch(
- batch_text_or_text_pairs,
- add_special_tokens=add_special_tokens,
- is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
- )
-
- # Convert encoding to dict
- # `Tokens` has type: Tuple[
- # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
- # List[EncodingFast]
- # ]
- # with nested dimensions corresponding to batch, overflows, sequence length
- tokens_and_encodings = [
- self._convert_encoding(
- encoding=encoding,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=True
- if word_labels is not None
- else return_offsets_mapping, # we use offsets to create the labels
- return_length=return_length,
- verbose=verbose,
- )
- for encoding in encodings
- ]
-
- # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
- # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
- # (we say ~ because the number of overflow varies with the example in the batch)
- #
- # To match each overflowing sample with the original sample in the batch
- # we add an overflow_to_sample_mapping array (see below)
- sanitized_tokens = {}
- for key in tokens_and_encodings[0][0].keys():
- stack = [e for item, _ in tokens_and_encodings for e in item[key]]
- sanitized_tokens[key] = stack
- sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
-
- # If returning overflowing tokens, we need to return a mapping
- # from the batch idx to the original sample
- if return_overflowing_tokens:
- overflow_to_sample_mapping = []
- for i, (toks, _) in enumerate(tokens_and_encodings):
- overflow_to_sample_mapping += [i] * len(toks["input_ids"])
- sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
-
- for input_ids in sanitized_tokens["input_ids"]:
- self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
-
- # create the token boxes
- token_boxes = []
- for batch_index in range(len(sanitized_tokens["input_ids"])):
- if return_overflowing_tokens:
- original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
- else:
- original_index = batch_index
- token_boxes_example = []
- for id, sequence_id, word_id in zip(
- sanitized_tokens["input_ids"][batch_index],
- sanitized_encodings[batch_index].sequence_ids,
- sanitized_encodings[batch_index].word_ids,
- ):
- if word_id is not None:
- if is_pair and sequence_id == 0:
- token_boxes_example.append(self.pad_token_box)
- else:
- token_boxes_example.append(boxes[original_index][word_id])
- else:
- if id == self.cls_token_id:
- token_boxes_example.append(self.cls_token_box)
- elif id == self.sep_token_id:
- token_boxes_example.append(self.sep_token_box)
- elif id == self.pad_token_id:
- token_boxes_example.append(self.pad_token_box)
- else:
- raise ValueError("Id not recognized")
- token_boxes.append(token_boxes_example)
-
- sanitized_tokens["bbox"] = token_boxes
-
- # optionally, create the labels
- if word_labels is not None:
- labels = []
- for batch_index in range(len(sanitized_tokens["input_ids"])):
- if return_overflowing_tokens:
- original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
- else:
- original_index = batch_index
- labels_example = []
- for id, offset, word_id in zip(
- sanitized_tokens["input_ids"][batch_index],
- sanitized_tokens["offset_mapping"][batch_index],
- sanitized_encodings[batch_index].word_ids,
- ):
- if word_id is not None:
- if self.only_label_first_subword:
- if offset[0] == 0:
- # Use the real label id for the first token of the word, and padding ids for the remaining tokens
- labels_example.append(word_labels[original_index][word_id])
- else:
- labels_example.append(self.pad_token_label)
- else:
- labels_example.append(word_labels[original_index][word_id])
- else:
- labels_example.append(self.pad_token_label)
- labels.append(labels_example)
-
- sanitized_tokens["labels"] = labels
- # finally, remove offsets if the user didn't want them
- if not return_offsets_mapping:
- del sanitized_tokens["offset_mapping"]
-
- return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
-
- def _encode_plus(
- self,
- text: Union[TextInput, PreTokenizedInput],
- text_pair: Optional[PreTokenizedInput] = None,
- boxes: Optional[List[List[int]]] = None,
- word_labels: Optional[List[int]] = None,
- add_special_tokens: bool = True,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- return_tensors: Optional[bool] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- # make it a batched input
- # 2 options:
- # 1) only text, in case text must be a list of str
- # 2) text + text_pair, in which case text = str and text_pair a list of str
- batched_input = [(text, text_pair)] if text_pair else [text]
- batched_boxes = [boxes]
- batched_word_labels = [word_labels] if word_labels is not None else None
- batched_output = self._batch_encode_plus(
- batched_input,
- is_pair=bool(text_pair is not None),
- boxes=batched_boxes,
- word_labels=batched_word_labels,
- add_special_tokens=add_special_tokens,
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
-
- # Return tensor is None, then we can remove the leading batch axis
- # Overflowing tokens are returned as a batch of output so we keep them in this case
- if return_tensors is None and not return_overflowing_tokens:
- batched_output = BatchEncoding(
- {
- key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
- for key, value in batched_output.items()
- },
- batched_output.encodings,
- )
-
- self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
-
- return batched_output
-
- def _pad(
- self,
- encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
- max_length: Optional[int] = None,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- pad_to_multiple_of: Optional[int] = None,
- return_attention_mask: Optional[bool] = None,
- ) -> dict:
- """
- Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
-
- Args:
- encoded_inputs:
- Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
- max_length: maximum length of the returned list and optionally padding length (see below).
- Will truncate by taking into account the special tokens.
- padding_strategy: PaddingStrategy to use for padding.
-
- - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- - PaddingStrategy.DO_NOT_PAD: Do not pad
- The tokenizer padding sides are defined in self.padding_side:
-
- - 'left': pads on the left of the sequences
- - 'right': pads on the right of the sequences
- pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
- This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
- `>= 7.5` (Volta).
- return_attention_mask:
- (optional) Set to False to avoid returning attention mask (default: set to model specifics)
- """
- # Load from model defaults
- if return_attention_mask is None:
- return_attention_mask = "attention_mask" in self.model_input_names
-
- required_input = encoded_inputs[self.model_input_names[0]]
-
- if padding_strategy == PaddingStrategy.LONGEST:
- max_length = len(required_input)
-
- if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
- max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
-
- needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
-
- # Initialize attention mask if not present.
- if return_attention_mask and "attention_mask" not in encoded_inputs:
- encoded_inputs["attention_mask"] = [1] * len(required_input)
-
- if needs_to_be_padded:
- difference = max_length - len(required_input)
- if self.padding_side == "right":
- if return_attention_mask:
- encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
- if "token_type_ids" in encoded_inputs:
- encoded_inputs["token_type_ids"] = (
- encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
- )
- if "bbox" in encoded_inputs:
- encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
- if "labels" in encoded_inputs:
- encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
- if "special_tokens_mask" in encoded_inputs:
- encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
- encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
- elif self.padding_side == "left":
- if return_attention_mask:
- encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
- if "token_type_ids" in encoded_inputs:
- encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
- "token_type_ids"
- ]
- if "bbox" in encoded_inputs:
- encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
- if "labels" in encoded_inputs:
- encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
- if "special_tokens_mask" in encoded_inputs:
- encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
- encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
- else:
- raise ValueError("Invalid padding strategy:" + str(self.padding_side))
-
- return encoded_inputs
-
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
- """
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
- adding special tokens. A BERT sequence has the following format:
-
- - single sequence: `[CLS] X [SEP]`
- - pair of sequences: `[CLS] A [SEP] B [SEP]`
-
- Args:
- token_ids_0 (`List[int]`):
- List of IDs to which the special tokens will be added.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
-
- Returns:
- `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
- """
- output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
-
- if token_ids_1:
- output += token_ids_1 + [self.sep_token_id]
-
- return output
-
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
- pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
- sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
-
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
-
- Returns:
- `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
- """
- sep = [self.sep_token_id]
- cls = [self.cls_token_id]
- if token_ids_1 is None:
- return len(cls + token_ids_0 + sep) * [0]
- return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
-
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
- files = self._tokenizer.model.save(save_directory, name=filename_prefix)
- return tuple(files)
diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/dev/run_inference_tests.sh b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/dev/run_inference_tests.sh
deleted file mode 100644
index bc9dcc56f06f79fc5efa42c04ffdc07c2787e3ac..0000000000000000000000000000000000000000
--- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/dev/run_inference_tests.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash -e
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-BIN="python tools/train_net.py"
-OUTPUT="inference_test_output"
-NUM_GPUS=2
-
-CFG_LIST=( "${@:1}" )
-
-if [ ${#CFG_LIST[@]} -eq 0 ]; then
- CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml )
-fi
-
-echo "========================================================================"
-echo "Configs to run:"
-echo "${CFG_LIST[@]}"
-echo "========================================================================"
-
-
-for cfg in "${CFG_LIST[@]}"; do
- echo "========================================================================"
- echo "Running $cfg ..."
- echo "========================================================================"
- $BIN \
- --eval-only \
- --num-gpus $NUM_GPUS \
- --config-file "$cfg" \
- OUTPUT_DIR $OUTPUT
- rm -rf $OUTPUT
-done
-
-
-echo "========================================================================"
-echo "Running demo.py ..."
-echo "========================================================================"
-DEMO_BIN="python demo/demo.py"
-COCO_DIR=datasets/coco/val2014
-mkdir -pv $OUTPUT
-
-set -v
-
-$DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \
- --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT
-rm -rf $OUTPUT
diff --git a/spaces/yuan2023/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/controlnet/controlnet_hed.py b/spaces/yuan2023/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/controlnet/controlnet_hed.py
deleted file mode 100644
index d85742c8cad946a4d25ac4a4493abcd9b88979c9..0000000000000000000000000000000000000000
--- a/spaces/yuan2023/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/controlnet/controlnet_hed.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import gradio as gr
-import torch
-from controlnet_aux import HEDdetector
-from diffusers import (
- ControlNetModel,
- StableDiffusionControlNetPipeline,
- UniPCMultistepScheduler,
-)
-from PIL import Image
-
-stable_model_list = [
- "runwayml/stable-diffusion-v1-5",
- "stabilityai/stable-diffusion-2-1",
-]
-
-controlnet_hed_model_list = [
- "lllyasviel/sd-controlnet-hed",
- "thibaud/controlnet-sd21-hed-diffusers",
-]
-
-stable_prompt_list = ["a photo of a man.", "a photo of a girl."]
-
-stable_negative_prompt_list = ["bad, ugly", "deformed"]
-
-data_list = [
- "data/test.png",
-]
-
-
-def controlnet_hed(image_path: str, controlnet_hed_model_path: str):
- hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
-
- image = Image.open(image_path)
- image = hed(image)
-
- controlnet = ControlNetModel.from_pretrained(
- controlnet_hed_model_path, torch_dtype=torch.float16
- )
- return controlnet, image
-
-
-def stable_diffusion_controlnet_hed(
- image_path: str,
- stable_model_path: str,
- controlnet_hed_model_path: str,
- prompt: str,
- negative_prompt: str,
- guidance_scale: int,
- num_inference_step: int,
-):
-
- controlnet, image = controlnet_hed(
- image_path=image_path,
- controlnet_hed_model_path=controlnet_hed_model_path,
- )
-
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
- pretrained_model_name_or_path=stable_model_path,
- controlnet=controlnet,
- safety_checker=None,
- torch_dtype=torch.float16,
- )
-
- pipe.to("cuda")
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
- pipe.enable_xformers_memory_efficient_attention()
-
- output = pipe(
- prompt=prompt,
- image=image,
- negative_prompt=negative_prompt,
- num_inference_steps=num_inference_step,
- guidance_scale=guidance_scale,
- ).images
-
- return output[0]
-
-
-def stable_diffusion_controlnet_hed_app():
- with gr.Blocks():
- with gr.Row():
- with gr.Column():
- controlnet_hed_image_file = gr.Image(
- type="filepath", label="Image"
- )
-
- controlnet_hed_stable_model_id = gr.Dropdown(
- choices=stable_model_list,
- value=stable_model_list[0],
- label="Stable Model Id",
- )
-
- controlnet_hed_model_id = gr.Dropdown(
- choices=controlnet_hed_model_list,
- value=controlnet_hed_model_list[1],
- label="ControlNet Model Id",
- )
-
- controlnet_hed_prompt = gr.Textbox(
- lines=1, value=stable_prompt_list[0], label="Prompt"
- )
-
- controlnet_hed_negative_prompt = gr.Textbox(
- lines=1,
- value=stable_negative_prompt_list[0],
- label="Negative Prompt",
- )
-
- with gr.Accordion("Advanced Options", open=False):
- controlnet_hed_guidance_scale = gr.Slider(
- minimum=0.1,
- maximum=15,
- step=0.1,
- value=7.5,
- label="Guidance Scale",
- )
-
- controlnet_hed_num_inference_step = gr.Slider(
- minimum=1,
- maximum=100,
- step=1,
- value=50,
- label="Num Inference Step",
- )
-
- controlnet_hed_predict = gr.Button(value="Generator")
-
- with gr.Column():
- output_image = gr.Image(label="Output")
-
- gr.Examples(
- fn=stable_diffusion_controlnet_hed,
- examples=[
- [
- data_list[0],
- stable_model_list[0],
- controlnet_hed_model_list[0],
- stable_prompt_list[0],
- stable_negative_prompt_list[0],
- 7.5,
- 50,
- ]
- ],
- inputs=[
- controlnet_hed_image_file,
- controlnet_hed_stable_model_id,
- controlnet_hed_model_id,
- controlnet_hed_prompt,
- controlnet_hed_negative_prompt,
- controlnet_hed_guidance_scale,
- controlnet_hed_num_inference_step,
- ],
- outputs=[output_image],
- cache_examples=False,
- label="ControlNet HED Example",
- )
-
- controlnet_hed_predict.click(
- fn=stable_diffusion_controlnet_hed,
- inputs=[
- controlnet_hed_image_file,
- controlnet_hed_stable_model_id,
- controlnet_hed_model_id,
- controlnet_hed_prompt,
- controlnet_hed_negative_prompt,
- controlnet_hed_guidance_scale,
- controlnet_hed_num_inference_step,
- ],
- outputs=[output_image],
- )
diff --git a/spaces/zachriek/chatgpt-clone/app.py b/spaces/zachriek/chatgpt-clone/app.py
deleted file mode 100644
index 231f6e952ab746c61d870f1b99aac87386214081..0000000000000000000000000000000000000000
--- a/spaces/zachriek/chatgpt-clone/app.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import os
-import openai
-import gradio as gr
-from dotenv import load_dotenv
-
-load_dotenv()
-
-openai.api_key = os.getenv("API_KEY")
-
-start_sequence = "\nAI: "
-restart_sequence = "\nHuman: "
-
-prompt = "The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman: "
-
-
-def openai_create(prompt):
- response = openai.Completion.create(
- model="text-davinci-003",
- prompt=prompt,
- temperature=0.9,
- max_tokens=150,
- top_p=1,
- frequency_penalty=0,
- presence_penalty=0,
- stop=[" Human:", " AI:"]
- )
- return response.choices[0].text
-
-
-def chatgpt_clone(input, history):
- history = history or []
- s = list(sum(history, ()))
- s.append(input)
- inp = ' '.join(s)
- output = openai_create(inp)
- history.append((input, output))
- return history, history
-
-
-block = gr.Blocks()
-
-
-with block:
- gr.Markdown("""Build Your Own ChatGPT with OpenAI API & Gradio
- """)
- chatbot = gr.Chatbot().style(color_map=("#CE6400", "#0b0f19"))
- message = gr.Textbox(placeholder=prompt)
- state = gr.State()
- submit = gr.Button("SEND", variant="primary")
- submit.click(chatgpt_clone, inputs=[
- message, state], outputs=[chatbot, state])
-
-block.launch()
diff --git a/spaces/zanderchase/chat-your-data-chef/ingest_data.py b/spaces/zanderchase/chat-your-data-chef/ingest_data.py
deleted file mode 100644
index 2cb861e7477b3333e27d63e6ca17b50c98ee54a3..0000000000000000000000000000000000000000
--- a/spaces/zanderchase/chat-your-data-chef/ingest_data.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from langchain.text_splitter import RecursiveCharacterTextSplitter
-from langchain.document_loaders import CollegeConfidentialLoader
-from langchain.vectorstores.faiss import FAISS
-from langchain.embeddings import OpenAIEmbeddings
-import pickle
-from bs4 import BeautifulSoup
-from selenium import webdriver
-from selenium.webdriver.common.by import By
-from selenium.webdriver.common.keys import Keys
-import time
-# Scrape college data links:
-browser = webdriver.Chrome()
-
-browser.get("https://www.collegeconfidential.com/colleges/")
-time.sleep(1)
-
-elem = browser.find_element(By.TAG_NAME, "body")
-
-no_of_pagedowns = 5
-
-while no_of_pagedowns:
- elem.send_keys(Keys.PAGE_DOWN)
- time.sleep(14) #10
- no_of_pagedowns-=1
-
-html = browser.page_source
-print(html)
-soup = BeautifulSoup(html, "html.parser")
-schools = soup.find_all("div", {"class": "l-row l-gx-3 l-gx-xl-4 l-gy-4"})[0]
-print("------")
-print(schools)
-raw_documents = []
-i = 1
-for s in schools.find_all("a", {"class": "u-margin-bottom-xxs"}, href=True):
- college_link = s['href']
- print(college_link)
- print(i)
- i += 1
- # Load Data
- loader = CollegeConfidentialLoader("https://www.collegeconfidential.com" + college_link)
- data = loader.load()[0]
- raw_documents.append(data)
- if i > 2:
- break
-print(raw_documents)
-
-
-
-
-
-# Split text
-text_splitter = RecursiveCharacterTextSplitter()
-documents = text_splitter.split_documents(raw_documents)
-
-print("YOOOO")
-print(documents)
-
-# Load Data to vectorstore
-embeddings = OpenAIEmbeddings()
-vectorstore = FAISS.from_documents(documents, embeddings)
-
-query = "What is the average ACT at UChicago?"
-
-docs = vectorstore.similarity_search(query)
-
-print("HEYY")
-print(docs)
-
-# Save vectorstore
-with open("vectorstore.pkl", "wb") as f:
- pickle.dump(vectorstore, f)
\ No newline at end of file
diff --git a/spaces/zfj41/webui/README.md b/spaces/zfj41/webui/README.md
deleted file mode 100644
index 013d12c9f3a56698056ae1bdbbfb0ec009805237..0000000000000000000000000000000000000000
--- a/spaces/zfj41/webui/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-title: Stable Diffusion Web UI
-emoji: 🚧
-colorFrom: yellow
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.9
-app_file: app.py
-pinned: false
-duplicated_from: camenduru/webui
----
-
-## Stable Diffusion Web UI
-[https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
-
-## Documentation
-[https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki)
-
-## Models License
-https://huggingface.co/spaces/CompVis/stable-diffusion-license
\ No newline at end of file
diff --git a/spaces/zhang-wei-jian/docker/node_modules/keygrip/HISTORY.md b/spaces/zhang-wei-jian/docker/node_modules/keygrip/HISTORY.md
deleted file mode 100644
index 0f4cc31a4b3525baa4bc7de3f448121f1b5321f9..0000000000000000000000000000000000000000
--- a/spaces/zhang-wei-jian/docker/node_modules/keygrip/HISTORY.md
+++ /dev/null
@@ -1,25 +0,0 @@
-1.1.0 / 2019-05-07
-==================
-
- * Use `tsscmp` module for timing-safe signature verification
-
-1.0.3 / 2018-09-12
-==================
-
- * perf: enable strict mode
-
-1.0.2 / 2017-08-26
-==================
-
- * perf: improve comparison speed
-
-1.0.1 / 2014-05-07
-==================
-
- * Readme changes
- * Update repository for organization move
-
-1.0.0 / 2013-12-21
-==================
-
- * Remove default key generation and associated expectations
diff --git a/spaces/zhangs2022/ChuanhuChatGPT/modules/shared.py b/spaces/zhangs2022/ChuanhuChatGPT/modules/shared.py
deleted file mode 100644
index a9e72580aa7ae48f907e923a09099513570a9ad8..0000000000000000000000000000000000000000
--- a/spaces/zhangs2022/ChuanhuChatGPT/modules/shared.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST
-import os
-import queue
-
-class State:
- interrupted = False
- multi_api_key = False
- completion_url = COMPLETION_URL
- balance_api_url = BALANCE_API_URL
- usage_api_url = USAGE_API_URL
-
- def interrupt(self):
- self.interrupted = True
-
- def recover(self):
- self.interrupted = False
-
- def set_api_host(self, api_host):
- self.completion_url = f"https://{api_host}/v1/chat/completions"
- self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants"
- self.usage_api_url = f"https://{api_host}/dashboard/billing/usage"
- os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1"
-
- def reset_api_host(self):
- self.completion_url = COMPLETION_URL
- self.balance_api_url = BALANCE_API_URL
- self.usage_api_url = USAGE_API_URL
- os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1"
- return API_HOST
-
- def reset_all(self):
- self.interrupted = False
- self.completion_url = COMPLETION_URL
-
- def set_api_key_queue(self, api_key_list):
- self.multi_api_key = True
- self.api_key_queue = queue.Queue()
- for api_key in api_key_list:
- self.api_key_queue.put(api_key)
-
- def switching_api_key(self, func):
- if not hasattr(self, "api_key_queue"):
- return func
-
- def wrapped(*args, **kwargs):
- api_key = self.api_key_queue.get()
- args[0].api_key = api_key
- ret = func(*args, **kwargs)
- self.api_key_queue.put(api_key)
- return ret
-
- return wrapped
-
-
-state = State()
diff --git a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/pages/api/create.ts b/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/pages/api/create.ts
deleted file mode 100644
index 508fa97ef609cbb215a61085711638e116235ebe..0000000000000000000000000000000000000000
--- a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/pages/api/create.ts
+++ /dev/null
@@ -1,31 +0,0 @@
-'use server'
-
-import { NextApiRequest, NextApiResponse } from 'next'
-import { fetch, debug } from '@/lib/isomorphic'
-import { createHeaders } from '@/lib/utils'
-
-// const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create'
-const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create';
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- try {
- const headers = createHeaders(req.cookies)
-
- res.writeHead(200, {
- 'Content-Type': 'application/json',
- })
-
- debug('headers', headers)
- const response = await fetch(API_ENDPOINT, { method: 'GET', headers })
- .then((res) => res.text())
-
- res.end(response)
- } catch (e) {
- return res.end(JSON.stringify({
- result: {
- value: 'UnauthorizedRequest',
- message: `${e}`
- }
- }))
- }
-}
diff --git a/spaces/zlc99/M4Singer/modules/parallel_wavegan/models/parallel_wavegan.py b/spaces/zlc99/M4Singer/modules/parallel_wavegan/models/parallel_wavegan.py
deleted file mode 100644
index c63b59f67aa48342179415c1d1beac68574a5498..0000000000000000000000000000000000000000
--- a/spaces/zlc99/M4Singer/modules/parallel_wavegan/models/parallel_wavegan.py
+++ /dev/null
@@ -1,434 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2019 Tomoki Hayashi
-# MIT License (https://opensource.org/licenses/MIT)
-
-"""Parallel WaveGAN Modules."""
-
-import logging
-import math
-
-import torch
-from torch import nn
-
-from modules.parallel_wavegan.layers import Conv1d
-from modules.parallel_wavegan.layers import Conv1d1x1
-from modules.parallel_wavegan.layers import ResidualBlock
-from modules.parallel_wavegan.layers import upsample
-from modules.parallel_wavegan import models
-
-
-class ParallelWaveGANGenerator(torch.nn.Module):
- """Parallel WaveGAN Generator module."""
-
- def __init__(self,
- in_channels=1,
- out_channels=1,
- kernel_size=3,
- layers=30,
- stacks=3,
- residual_channels=64,
- gate_channels=128,
- skip_channels=64,
- aux_channels=80,
- aux_context_window=2,
- dropout=0.0,
- bias=True,
- use_weight_norm=True,
- use_causal_conv=False,
- upsample_conditional_features=True,
- upsample_net="ConvInUpsampleNetwork",
- upsample_params={"upsample_scales": [4, 4, 4, 4]},
- use_pitch_embed=False,
- ):
- """Initialize Parallel WaveGAN Generator module.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- kernel_size (int): Kernel size of dilated convolution.
- layers (int): Number of residual block layers.
- stacks (int): Number of stacks i.e., dilation cycles.
- residual_channels (int): Number of channels in residual conv.
- gate_channels (int): Number of channels in gated conv.
- skip_channels (int): Number of channels in skip conv.
- aux_channels (int): Number of channels for auxiliary feature conv.
- aux_context_window (int): Context window size for auxiliary feature.
- dropout (float): Dropout rate. 0.0 means no dropout applied.
- bias (bool): Whether to use bias parameter in conv layer.
- use_weight_norm (bool): Whether to use weight norm.
- If set to true, it will be applied to all of the conv layers.
- use_causal_conv (bool): Whether to use causal structure.
- upsample_conditional_features (bool): Whether to use upsampling network.
- upsample_net (str): Upsampling network architecture.
- upsample_params (dict): Upsampling network parameters.
-
- """
- super(ParallelWaveGANGenerator, self).__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.aux_channels = aux_channels
- self.layers = layers
- self.stacks = stacks
- self.kernel_size = kernel_size
-
- # check the number of layers and stacks
- assert layers % stacks == 0
- layers_per_stack = layers // stacks
-
- # define first convolution
- self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True)
-
- # define conv + upsampling network
- if upsample_conditional_features:
- upsample_params.update({
- "use_causal_conv": use_causal_conv,
- })
- if upsample_net == "MelGANGenerator":
- assert aux_context_window == 0
- upsample_params.update({
- "use_weight_norm": False, # not to apply twice
- "use_final_nonlinear_activation": False,
- })
- self.upsample_net = getattr(models, upsample_net)(**upsample_params)
- else:
- if upsample_net == "ConvInUpsampleNetwork":
- upsample_params.update({
- "aux_channels": aux_channels,
- "aux_context_window": aux_context_window,
- })
- self.upsample_net = getattr(upsample, upsample_net)(**upsample_params)
- else:
- self.upsample_net = None
-
- # define residual blocks
- self.conv_layers = torch.nn.ModuleList()
- for layer in range(layers):
- dilation = 2 ** (layer % layers_per_stack)
- conv = ResidualBlock(
- kernel_size=kernel_size,
- residual_channels=residual_channels,
- gate_channels=gate_channels,
- skip_channels=skip_channels,
- aux_channels=aux_channels,
- dilation=dilation,
- dropout=dropout,
- bias=bias,
- use_causal_conv=use_causal_conv,
- )
- self.conv_layers += [conv]
-
- # define output layers
- self.last_conv_layers = torch.nn.ModuleList([
- torch.nn.ReLU(inplace=True),
- Conv1d1x1(skip_channels, skip_channels, bias=True),
- torch.nn.ReLU(inplace=True),
- Conv1d1x1(skip_channels, out_channels, bias=True),
- ])
-
- self.use_pitch_embed = use_pitch_embed
- if use_pitch_embed:
- self.pitch_embed = nn.Embedding(300, aux_channels, 0)
- self.c_proj = nn.Linear(2 * aux_channels, aux_channels)
-
- # apply weight norm
- if use_weight_norm:
- self.apply_weight_norm()
-
- def forward(self, x, c=None, pitch=None, **kwargs):
- """Calculate forward propagation.
-
- Args:
- x (Tensor): Input noise signal (B, C_in, T).
- c (Tensor): Local conditioning auxiliary features (B, C ,T').
- pitch (Tensor): Local conditioning pitch (B, T').
-
- Returns:
- Tensor: Output tensor (B, C_out, T)
-
- """
- # perform upsampling
- if c is not None and self.upsample_net is not None:
- if self.use_pitch_embed:
- p = self.pitch_embed(pitch)
- c = self.c_proj(torch.cat([c.transpose(1, 2), p], -1)).transpose(1, 2)
- c = self.upsample_net(c)
- assert c.size(-1) == x.size(-1), (c.size(-1), x.size(-1))
-
- # encode to hidden representation
- x = self.first_conv(x)
- skips = 0
- for f in self.conv_layers:
- x, h = f(x, c)
- skips += h
- skips *= math.sqrt(1.0 / len(self.conv_layers))
-
- # apply final layers
- x = skips
- for f in self.last_conv_layers:
- x = f(x)
-
- return x
-
- def remove_weight_norm(self):
- """Remove weight normalization module from all of the layers."""
- def _remove_weight_norm(m):
- try:
- logging.debug(f"Weight norm is removed from {m}.")
- torch.nn.utils.remove_weight_norm(m)
- except ValueError: # this module didn't have weight norm
- return
-
- self.apply(_remove_weight_norm)
-
- def apply_weight_norm(self):
- """Apply weight normalization module from all of the layers."""
- def _apply_weight_norm(m):
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
- torch.nn.utils.weight_norm(m)
- logging.debug(f"Weight norm is applied to {m}.")
-
- self.apply(_apply_weight_norm)
-
- @staticmethod
- def _get_receptive_field_size(layers, stacks, kernel_size,
- dilation=lambda x: 2 ** x):
- assert layers % stacks == 0
- layers_per_cycle = layers // stacks
- dilations = [dilation(i % layers_per_cycle) for i in range(layers)]
- return (kernel_size - 1) * sum(dilations) + 1
-
- @property
- def receptive_field_size(self):
- """Return receptive field size."""
- return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size)
-
-
-class ParallelWaveGANDiscriminator(torch.nn.Module):
- """Parallel WaveGAN Discriminator module."""
-
- def __init__(self,
- in_channels=1,
- out_channels=1,
- kernel_size=3,
- layers=10,
- conv_channels=64,
- dilation_factor=1,
- nonlinear_activation="LeakyReLU",
- nonlinear_activation_params={"negative_slope": 0.2},
- bias=True,
- use_weight_norm=True,
- ):
- """Initialize Parallel WaveGAN Discriminator module.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- kernel_size (int): Number of output channels.
- layers (int): Number of conv layers.
- conv_channels (int): Number of chnn layers.
- dilation_factor (int): Dilation factor. For example, if dilation_factor = 2,
- the dilation will be 2, 4, 8, ..., and so on.
- nonlinear_activation (str): Nonlinear function after each conv.
- nonlinear_activation_params (dict): Nonlinear function parameters
- bias (bool): Whether to use bias parameter in conv.
- use_weight_norm (bool) Whether to use weight norm.
- If set to true, it will be applied to all of the conv layers.
-
- """
- super(ParallelWaveGANDiscriminator, self).__init__()
- assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
- assert dilation_factor > 0, "Dilation factor must be > 0."
- self.conv_layers = torch.nn.ModuleList()
- conv_in_channels = in_channels
- for i in range(layers - 1):
- if i == 0:
- dilation = 1
- else:
- dilation = i if dilation_factor == 1 else dilation_factor ** i
- conv_in_channels = conv_channels
- padding = (kernel_size - 1) // 2 * dilation
- conv_layer = [
- Conv1d(conv_in_channels, conv_channels,
- kernel_size=kernel_size, padding=padding,
- dilation=dilation, bias=bias),
- getattr(torch.nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params)
- ]
- self.conv_layers += conv_layer
- padding = (kernel_size - 1) // 2
- last_conv_layer = Conv1d(
- conv_in_channels, out_channels,
- kernel_size=kernel_size, padding=padding, bias=bias)
- self.conv_layers += [last_conv_layer]
-
- # apply weight norm
- if use_weight_norm:
- self.apply_weight_norm()
-
- def forward(self, x):
- """Calculate forward propagation.
-
- Args:
- x (Tensor): Input noise signal (B, 1, T).
-
- Returns:
- Tensor: Output tensor (B, 1, T)
-
- """
- for f in self.conv_layers:
- x = f(x)
- return x
-
- def apply_weight_norm(self):
- """Apply weight normalization module from all of the layers."""
- def _apply_weight_norm(m):
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
- torch.nn.utils.weight_norm(m)
- logging.debug(f"Weight norm is applied to {m}.")
-
- self.apply(_apply_weight_norm)
-
- def remove_weight_norm(self):
- """Remove weight normalization module from all of the layers."""
- def _remove_weight_norm(m):
- try:
- logging.debug(f"Weight norm is removed from {m}.")
- torch.nn.utils.remove_weight_norm(m)
- except ValueError: # this module didn't have weight norm
- return
-
- self.apply(_remove_weight_norm)
-
-
-class ResidualParallelWaveGANDiscriminator(torch.nn.Module):
- """Parallel WaveGAN Discriminator module."""
-
- def __init__(self,
- in_channels=1,
- out_channels=1,
- kernel_size=3,
- layers=30,
- stacks=3,
- residual_channels=64,
- gate_channels=128,
- skip_channels=64,
- dropout=0.0,
- bias=True,
- use_weight_norm=True,
- use_causal_conv=False,
- nonlinear_activation="LeakyReLU",
- nonlinear_activation_params={"negative_slope": 0.2},
- ):
- """Initialize Parallel WaveGAN Discriminator module.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- kernel_size (int): Kernel size of dilated convolution.
- layers (int): Number of residual block layers.
- stacks (int): Number of stacks i.e., dilation cycles.
- residual_channels (int): Number of channels in residual conv.
- gate_channels (int): Number of channels in gated conv.
- skip_channels (int): Number of channels in skip conv.
- dropout (float): Dropout rate. 0.0 means no dropout applied.
- bias (bool): Whether to use bias parameter in conv.
- use_weight_norm (bool): Whether to use weight norm.
- If set to true, it will be applied to all of the conv layers.
- use_causal_conv (bool): Whether to use causal structure.
- nonlinear_activation_params (dict): Nonlinear function parameters
-
- """
- super(ResidualParallelWaveGANDiscriminator, self).__init__()
- assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
-
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.layers = layers
- self.stacks = stacks
- self.kernel_size = kernel_size
-
- # check the number of layers and stacks
- assert layers % stacks == 0
- layers_per_stack = layers // stacks
-
- # define first convolution
- self.first_conv = torch.nn.Sequential(
- Conv1d1x1(in_channels, residual_channels, bias=True),
- getattr(torch.nn, nonlinear_activation)(
- inplace=True, **nonlinear_activation_params),
- )
-
- # define residual blocks
- self.conv_layers = torch.nn.ModuleList()
- for layer in range(layers):
- dilation = 2 ** (layer % layers_per_stack)
- conv = ResidualBlock(
- kernel_size=kernel_size,
- residual_channels=residual_channels,
- gate_channels=gate_channels,
- skip_channels=skip_channels,
- aux_channels=-1,
- dilation=dilation,
- dropout=dropout,
- bias=bias,
- use_causal_conv=use_causal_conv,
- )
- self.conv_layers += [conv]
-
- # define output layers
- self.last_conv_layers = torch.nn.ModuleList([
- getattr(torch.nn, nonlinear_activation)(
- inplace=True, **nonlinear_activation_params),
- Conv1d1x1(skip_channels, skip_channels, bias=True),
- getattr(torch.nn, nonlinear_activation)(
- inplace=True, **nonlinear_activation_params),
- Conv1d1x1(skip_channels, out_channels, bias=True),
- ])
-
- # apply weight norm
- if use_weight_norm:
- self.apply_weight_norm()
-
- def forward(self, x):
- """Calculate forward propagation.
-
- Args:
- x (Tensor): Input noise signal (B, 1, T).
-
- Returns:
- Tensor: Output tensor (B, 1, T)
-
- """
- x = self.first_conv(x)
-
- skips = 0
- for f in self.conv_layers:
- x, h = f(x, None)
- skips += h
- skips *= math.sqrt(1.0 / len(self.conv_layers))
-
- # apply final layers
- x = skips
- for f in self.last_conv_layers:
- x = f(x)
- return x
-
- def apply_weight_norm(self):
- """Apply weight normalization module from all of the layers."""
- def _apply_weight_norm(m):
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
- torch.nn.utils.weight_norm(m)
- logging.debug(f"Weight norm is applied to {m}.")
-
- self.apply(_apply_weight_norm)
-
- def remove_weight_norm(self):
- """Remove weight normalization module from all of the layers."""
- def _remove_weight_norm(m):
- try:
- logging.debug(f"Weight norm is removed from {m}.")
- torch.nn.utils.remove_weight_norm(m)
- except ValueError: # this module didn't have weight norm
- return
-
- self.apply(_remove_weight_norm)
diff --git a/spaces/zomehwh/vits-models-pcr/app.py b/spaces/zomehwh/vits-models-pcr/app.py
deleted file mode 100644
index c6a285285e90b8da370f2b41b88a6d2c46cb13c1..0000000000000000000000000000000000000000
--- a/spaces/zomehwh/vits-models-pcr/app.py
+++ /dev/null
@@ -1,287 +0,0 @@
-# coding=utf-8
-import os
-import re
-import argparse
-import utils
-import commons
-import json
-import torch
-import gradio as gr
-from models import SynthesizerTrn
-from text import text_to_sequence, _clean_text
-from torch import no_grad, LongTensor
-import gradio.processing_utils as gr_processing_utils
-import logging
-logging.getLogger('numba').setLevel(logging.WARNING)
-limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
-
-hps_ms = utils.get_hparams_from_file(r'config/config.json')
-
-audio_postprocess_ori = gr.Audio.postprocess
-
-def audio_postprocess(self, y):
- data = audio_postprocess_ori(self, y)
- if data is None:
- return None
- return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
-
-
-gr.Audio.postprocess = audio_postprocess
-
-def get_text(text, hps, is_symbol):
- text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
- if hps.data.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = LongTensor(text_norm)
- return text_norm, clean_text
-
-def create_tts_fn(net_g_ms, speaker_id):
- def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
- text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
- if limitation:
- text_len = len(re.sub("\[([A-Z]{2})\]", "", text))
- max_len = 100
- if is_symbol:
- max_len *= 3
- if text_len > max_len:
- return "Error: Text is too long", None
- if not is_symbol:
- if language == 0:
- text = f"[ZH]{text}[ZH]"
- elif language == 1:
- text = f"[JA]{text}[JA]"
- else:
- text = f"{text}"
- stn_tst, clean_text = get_text(text, hps_ms, is_symbol)
- with no_grad():
- x_tst = stn_tst.unsqueeze(0).to(device)
- x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
- sid = LongTensor([speaker_id]).to(device)
- audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
- length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
-
- return "Success", (22050, audio)
- return tts_fn
-
-def create_to_symbol_fn(hps):
- def to_symbol_fn(is_symbol_input, input_text, temp_lang):
- if temp_lang == 0:
- clean_text = f'[ZH]{input_text}[ZH]'
- elif temp_lang == 1:
- clean_text = f'[JA]{input_text}[JA]'
- else:
- clean_text = input_text
- return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else ''
-
- return to_symbol_fn
-def change_lang(language):
- if language == 0:
- return 0.6, 0.668, 1.2
- elif language == 1:
- return 0.6, 0.668, 1
- else:
- return 0.6, 0.668, 1
-
-download_audio_js = """
-() =>{{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio");
- let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea");
- if (audio == undefined)
- return;
- text = text.value;
- if (text == undefined)
- text = Math.floor(Math.random()*100000000);
- audio = audio.src;
- let oA = document.createElement("a");
- oA.download = text.substr(0, 20)+'.wav';
- oA.href = audio;
- document.body.appendChild(oA);
- oA.click();
- oA.remove();
-}}
-"""
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--device', type=str, default='cpu')
- parser.add_argument('--api', action="store_true", default=False)
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
- args = parser.parse_args()
- device = torch.device(args.device)
- categories = ["Princess Connect! Re:Dive"]
- others = {
- "Blue Archive": "https://huggingface.co/spaces/sayashi/vits-models",
- "Genshin Impact": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3",
- "Honkai Impact 3rd": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3",
- "Overwatch 2": "https://huggingface.co/spaces/sayashi/vits-models-ow2"
- }
- models = []
- with open("pretrained_models/info.json", "r", encoding="utf-8") as f:
- models_info = json.load(f)
- for i, info in models_info.items():
- if info['title'].split("-")[0] not in categories or not info['enable']:
- continue
- sid = info['sid']
- name_en = info['name_en']
- name_zh = info['name_zh']
- title = info['title']
- cover = f"pretrained_models/{i}/{info['cover']}"
- example = info['example']
- language = info['language']
- net_g_ms = SynthesizerTrn(
- len(hps_ms.symbols),
- hps_ms.data.filter_length // 2 + 1,
- hps_ms.train.segment_size // hps_ms.data.hop_length,
- n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0,
- **hps_ms.model)
- utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None)
- _ = net_g_ms.eval().to(device)
- models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms)))
- with gr.Blocks() as app:
- gr.Markdown(
- "# vits-models\n"
- "## Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n"
- "## 请不要生成会对个人以及组织造成侵害的内容\n"
- "\n\n"
- "[](https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)\n\n"
- "[](https://huggingface.co/spaces/sayashi/vits-models?duplicate=true)\n\n"
- "[](https://github.com/SayaSS/vits-finetuning)"
- )
-
- with gr.Tabs():
- for category in categories:
- with gr.TabItem(category):
- with gr.TabItem("EN"):
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
- if title.split("-")[0] != category:
- continue
- with gr.TabItem(name_en):
- with gr.Row():
- gr.Markdown(
- ''
- f'
{title} '
- f'
' if cover else ""
- '
'
- )
- with gr.Row():
- with gr.Column():
- input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}")
- lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"],
- type="index", value=language)
- with gr.Accordion(label="Advanced Options", open=False):
- symbol_input = gr.Checkbox(value=False, label="Symbol input")
- symbol_list = gr.Dataset(label="Symbol list", components=[input_text],
- samples=[[x] for x in hps_ms.symbols])
- symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
- btn = gr.Button(value="Generate", variant="primary")
- with gr.Row():
- ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
- nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
- ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
- with gr.Column():
- o1 = gr.Textbox(label="Output Message")
- o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
- download = gr.Button("Download Audio")
- btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2], api_name=f"tts-{name_en}")
- download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
- symbol_input.change(
- to_symbol_fn,
- [symbol_input, input_text, lang],
- [input_text]
- )
- symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
- _js=f"""
- (i,symbols) => {{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea");
- let startPos = text_input.selectionStart;
- let endPos = text_input.selectionEnd;
- let oldTxt = text_input.value;
- let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
- text_input.value = result;
- let x = window.scrollX, y = window.scrollY;
- text_input.focus();
- text_input.selectionStart = startPos + symbols[i].length;
- text_input.selectionEnd = startPos + symbols[i].length;
- text_input.blur();
- window.scrollTo(x, y);
- return text_input.value;
- }}""")
- with gr.TabItem("中文"):
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
- if title.split("-")[0] != category:
- continue
- with gr.TabItem(name_zh):
- with gr.Row():
- gr.Markdown(
- ''
- f'
{title} '
- f'
' if cover else ""
- '
'
- )
- with gr.Row():
- with gr.Column():
- input_text = gr.Textbox(label="文本 (100字上限)" if limitation else "文本", lines=5, value=example, elem_id=f"input-text-zh-{name_zh}")
- lang = gr.Dropdown(label="语言", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
- type="index", value="中文"if language == "Chinese" else "日语")
- with gr.Accordion(label="高级选项", open=False):
- symbol_input = gr.Checkbox(value=False, label="符号输入")
- symbol_list = gr.Dataset(label="符号列表", components=[input_text],
- samples=[[x] for x in hps_ms.symbols])
- symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
- btn = gr.Button(value="生成", variant="primary")
- with gr.Row():
- ns = gr.Slider(label="控制感情变化程度", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
- nsw = gr.Slider(label="控制音素发音长度", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
- ls = gr.Slider(label="控制整体语速", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
- with gr.Column():
- o1 = gr.Textbox(label="输出信息")
- o2 = gr.Audio(label="输出音频", elem_id=f"tts-audio-zh-{name_zh}")
- download = gr.Button("下载音频")
- btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2])
- download.click(None, [], [], _js=download_audio_js.format(audio_id=f"zh-{name_zh}"))
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
- symbol_input.change(
- to_symbol_fn,
- [symbol_input, input_text, lang],
- [input_text]
- )
- symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
- _js=f"""
- (i,symbols) => {{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let text_input = root.querySelector("#input-text-zh-{name_zh}").querySelector("textarea");
- let startPos = text_input.selectionStart;
- let endPos = text_input.selectionEnd;
- let oldTxt = text_input.value;
- let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
- text_input.value = result;
- let x = window.scrollX, y = window.scrollY;
- text_input.focus();
- text_input.selectionStart = startPos + symbols[i].length;
- text_input.selectionEnd = startPos + symbols[i].length;
- text_input.blur();
- window.scrollTo(x, y);
- return text_input.value;
- }}""")
- for category, link in others.items():
- with gr.TabItem(category):
- gr.Markdown(
- f'''
-
- Click to Go
-
-
-
- '''
- )
- app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
diff --git a/spaces/zxc314/vits-uma-genshin-honkai/Docker/Dockerfile b/spaces/zxc314/vits-uma-genshin-honkai/Docker/Dockerfile
deleted file mode 100644
index 4d39cdf02a2ec151686cc1d61234bf723068fed8..0000000000000000000000000000000000000000
--- a/spaces/zxc314/vits-uma-genshin-honkai/Docker/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM python:3.9-bullseye
-VOLUME ["/app"]
-WORKDIR /app
-# Set apt to Chinese mirror
-RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
-RUN apt-get update && apt-get -y install cmake git
-RUN git clone https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai
-WORKDIR /app/vits-uma-genshin-honkai
-RUN sed -i "s/\.launch()/\.launch(server_name=\"0.0.0.0\")/" /app/vits-uma-genshin-honkai/app.py
-ADD vits.sh /app/vits.sh
-EXPOSE 7860
-ENTRYPOINT [ "/app/vits.sh" ]
\ No newline at end of file
diff --git a/spaces/zzz666/ChuanhuChatGPT/assets/Kelpy-Codos.js b/spaces/zzz666/ChuanhuChatGPT/assets/Kelpy-Codos.js
deleted file mode 100644
index cfbaeedb4f371dfb5fe157db545b364046fca3e1..0000000000000000000000000000000000000000
--- a/spaces/zzz666/ChuanhuChatGPT/assets/Kelpy-Codos.js
+++ /dev/null
@@ -1,76 +0,0 @@
-// ==UserScript==
-// @name Kelpy Codos
-// @namespace https://github.com/Keldos-Li/Kelpy-Codos
-// @version 1.0.5
-// @author Keldos; https://keldos.me/
-// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially.
-// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22)
-// @license GPL-3.0
-// @grant none
-// ==/UserScript==
-
-(function () {
- 'use strict';
-
- function addCopyButton(pre) {
- var code = pre.querySelector('code');
- if (!code) {
- return; // 如果没有找到 元素,则不添加按钮
- }
- var firstChild = code.firstChild;
- if (!firstChild) {
- return; // 如果 元素没有子节点,则不添加按钮
- }
- var button = document.createElement('button');
- button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本
- button.style.position = 'relative';
- button.style.float = 'right';
- button.style.fontSize = '1em'; // 可选:调整按钮大小
- button.style.background = 'none'; // 可选:去掉背景颜色
- button.style.border = 'none'; // 可选:去掉边框
- button.style.cursor = 'pointer'; // 可选:显示指针样式
- button.addEventListener('click', function () {
- var range = document.createRange();
- range.selectNodeContents(code);
- range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前
- var selection = window.getSelection();
- selection.removeAllRanges();
- selection.addRange(range);
-
- try {
- var success = document.execCommand('copy');
- if (success) {
- button.textContent = '\u2714';
- setTimeout(function () {
- button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制”
- }, 2000);
- } else {
- button.textContent = '\u2716';
- }
- } catch (e) {
- console.error(e);
- button.textContent = '\u2716';
- }
-
- selection.removeAllRanges();
- });
- code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前
- }
-
- function handleNewElements(mutationsList, observer) {
- for (var mutation of mutationsList) {
- if (mutation.type === 'childList') {
- for (var node of mutation.addedNodes) {
- if (node.nodeName === 'PRE') {
- addCopyButton(node);
- }
- }
- }
- }
- }
-
- var observer = new MutationObserver(handleNewElements);
- observer.observe(document.documentElement, { childList: true, subtree: true });
-
- document.querySelectorAll('pre').forEach(addCopyButton);
-})();