hlky HF staff commited on
Commit
86c1f29
·
verified ·
1 Parent(s): 60bc3d0

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ images/fix-fp16.png filter=lfs diff=lfs merge=lfs -text
37
+ images/fix-fp32.png filter=lfs diff=lfs merge=lfs -text
38
+ images/orig-fp32.png filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": ".",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 512,
24
+ "scaling_factor": 0.13025,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ],
31
+ "force_upcast": false
32
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c
3
+ size 334643238
handler.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Tuple
2
+
3
+ import base64
4
+ import torch
5
+
6
+ from diffusers import AutoencoderKL
7
+ from safetensors.torch import _tobytes
8
+
9
+ def prepare_tensor(tensor: torch.Tensor) -> Tuple[str, List[int], str]:
10
+ tensor_data = base64.b64encode(_tobytes(tensor, "inputs")).decode("utf-8")
11
+ shape = list(tensor.shape)
12
+ dtype = str(tensor.dtype).split(".")[-1]
13
+ return tensor_data, shape, dtype
14
+
15
+
16
+ def unpack_tensor(tensor_data: str, shape: List[int], dtype: str) -> torch.Tensor:
17
+ tensor = base64.b64decode(tensor_data.encode("utf-8"))
18
+ DTYPE_MAP = {
19
+ "float16": torch.float16,
20
+ "float32": torch.float32,
21
+ "bfloat16": torch.bfloat16,
22
+ }
23
+ torch_dtype = DTYPE_MAP.get(dtype)
24
+ tensor = torch.frombuffer(bytearray(tensor), dtype=torch_dtype).reshape(shape)
25
+ return tensor
26
+
27
+ class EndpointHandler:
28
+ def __init__(self, path=""):
29
+ self.device = "cuda"
30
+ self.dtype = torch.float16
31
+ self.vae = AutoencoderKL.from_pretrained(path, torch_dtype=self.dtype).to(self.device, self.dtype).eval()
32
+
33
+ @torch.no_grad()
34
+ def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
35
+ """
36
+ Args:
37
+ data (:obj:):
38
+ includes the input data and the parameters for the inference.
39
+ """
40
+ tensor_data = data["inputs"]
41
+ parameters = data.get("parameters", {})
42
+ if "shape" not in parameters:
43
+ raise ValueError("Expected `shape` in parameters.")
44
+ if "dtype" not in parameters:
45
+ raise ValueError("Expected `dtype` in parameters.")
46
+
47
+ shape = parameters.get("shape")
48
+ dtype = parameters.get("dtype")
49
+
50
+ tensor = unpack_tensor(tensor_data, shape, dtype)
51
+
52
+ tensor = tensor.to(self.device, self.dtype)
53
+
54
+ # unscale/denormalize the latents
55
+ # denormalize with the mean and std if available and not None
56
+ has_latents_mean = (
57
+ hasattr(self.vae.config, "latents_mean")
58
+ and self.vae.config.latents_mean is not None
59
+ )
60
+ has_latents_std = (
61
+ hasattr(self.vae.config, "latents_std")
62
+ and self.vae.config.latents_std is not None
63
+ )
64
+ if has_latents_mean and has_latents_std:
65
+ latents_mean = (
66
+ torch.tensor(self.vae.config.latents_mean)
67
+ .view(1, 4, 1, 1)
68
+ .to(tensor.device, tensor.dtype)
69
+ )
70
+ latents_std = (
71
+ torch.tensor(self.vae.config.latents_std)
72
+ .view(1, 4, 1, 1)
73
+ .to(tensor.device, tensor.dtype)
74
+ )
75
+ tensor = (
76
+ tensor * latents_std / self.vae.config.scaling_factor + latents_mean
77
+ )
78
+ else:
79
+ tensor = tensor / self.vae.config.scaling_factor
80
+
81
+ with torch.no_grad():
82
+ image = self.vae.decode(tensor, return_dict=False)[0]
83
+
84
+ tensor_data, shape, dtype = prepare_tensor(image)
85
+
86
+ return {"tensor": tensor_data, "shape": shape, "dtype": dtype}
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ huggingface_hub
2
+ diffusers