hlky HF staff commited on
Commit
39c3dbe
·
verified ·
1 Parent(s): 96b9106

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ images/fix-fp16.png filter=lfs diff=lfs merge=lfs -text
37
+ images/fix-fp32.png filter=lfs diff=lfs merge=lfs -text
38
+ images/orig-fp32.png filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": ".",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 512,
24
+ "scaling_factor": 0.13025,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ],
31
+ "force_upcast": false
32
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c
3
+ size 334643238
handler.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import torch
3
+ from base64 import b64decode
4
+ from diffusers import AutoencoderKL
5
+ from diffusers.image_processor import VaeImageProcessor
6
+
7
+ class EndpointHandler:
8
+ def __init__(self, path=""):
9
+ self.device = "cuda"
10
+ self.dtype = torch.float16
11
+ self.vae = AutoencoderKL.from_pretrained(path, torch_dtype=self.dtype).to(self.device, self.dtype).eval()
12
+
13
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
14
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
15
+
16
+ @torch.no_grad()
17
+ def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
18
+ """
19
+ Args:
20
+ data (:obj:):
21
+ includes the input data and the parameters for the inference.
22
+ """
23
+ tensor = data["inputs"]
24
+ tensor = b64decode(tensor.encode("utf-8"))
25
+ parameters = data.get("parameters", {})
26
+ if "shape" not in parameters:
27
+ raise ValueError("Expected `shape` in parameters.")
28
+ if "dtype" not in parameters:
29
+ raise ValueError("Expected `dtype` in parameters.")
30
+
31
+ DTYPE_MAP = {
32
+ "float16": torch.float16,
33
+ "float32": torch.float32,
34
+ "bfloat16": torch.bfloat16,
35
+ }
36
+
37
+ shape = parameters.get("shape")
38
+ dtype = DTYPE_MAP.get(parameters.get("dtype"))
39
+ tensor = torch.frombuffer(bytearray(tensor), dtype=dtype).reshape(shape)
40
+
41
+ needs_upcasting = (
42
+ self.vae.dtype == torch.float16 and self.vae.config.force_upcast
43
+ )
44
+ if needs_upcasting:
45
+ self.vae = self.vae.to(torch.float32)
46
+ tensor = tensor.to(self.device, torch.float32)
47
+ else:
48
+ tensor = tensor.to(self.device, self.dtype)
49
+
50
+ # unscale/denormalize the latents
51
+ # denormalize with the mean and std if available and not None
52
+ has_latents_mean = (
53
+ hasattr(self.vae.config, "latents_mean")
54
+ and self.vae.config.latents_mean is not None
55
+ )
56
+ has_latents_std = (
57
+ hasattr(self.vae.config, "latents_std")
58
+ and self.vae.config.latents_std is not None
59
+ )
60
+ if has_latents_mean and has_latents_std:
61
+ latents_mean = (
62
+ torch.tensor(self.vae.config.latents_mean)
63
+ .view(1, 4, 1, 1)
64
+ .to(tensor.device, tensor.dtype)
65
+ )
66
+ latents_std = (
67
+ torch.tensor(self.vae.config.latents_std)
68
+ .view(1, 4, 1, 1)
69
+ .to(tensor.device, tensor.dtype)
70
+ )
71
+ tensor = (
72
+ tensor * latents_std / self.vae.config.scaling_factor + latents_mean
73
+ )
74
+ else:
75
+ tensor = tensor / self.vae.config.scaling_factor
76
+
77
+ with torch.no_grad():
78
+ image = self.vae.decode(tensor, return_dict=False)[0]
79
+
80
+ if needs_upcasting:
81
+ self.vae.to(dtype=torch.float16)
82
+
83
+ image = self.image_processor.postprocess(image, output_type="pil")
84
+
85
+ return image[0]
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ huggingface_hub
2
+ diffusers