hlky HF staff commited on
Commit
acfaf90
·
verified ·
1 Parent(s): f4307cc

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ images/fix-fp16.png filter=lfs diff=lfs merge=lfs -text
37
+ images/fix-fp32.png filter=lfs diff=lfs merge=lfs -text
38
+ images/orig-fp32.png filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKLHunyuanVideo",
3
+ "_diffusers_version": "0.32.0.dev0",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 512,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "HunyuanVideoDownBlock3D",
13
+ "HunyuanVideoDownBlock3D",
14
+ "HunyuanVideoDownBlock3D",
15
+ "HunyuanVideoDownBlock3D"
16
+ ],
17
+ "in_channels": 3,
18
+ "latent_channels": 16,
19
+ "layers_per_block": 2,
20
+ "mid_block_add_attention": true,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "scaling_factor": 0.476986,
24
+ "spatial_compression_ratio": 8,
25
+ "temporal_compression_ratio": 4,
26
+ "up_block_types": [
27
+ "HunyuanVideoUpBlock3D",
28
+ "HunyuanVideoUpBlock3D",
29
+ "HunyuanVideoUpBlock3D",
30
+ "HunyuanVideoUpBlock3D"
31
+ ]
32
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c68a6295f9034a88225fbafb1f3258291a08d57a1fdb938233fa57b1b8f4883
3
+ size 985943868
handler.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Tuple
2
+
3
+ import base64
4
+ import torch
5
+
6
+ from diffusers import AutoencoderKLHunyuanVideo
7
+ from safetensors.torch import _tobytes
8
+
9
+
10
+ def prepare_tensor(tensor: torch.Tensor) -> Tuple[str, List[int], str]:
11
+ tensor_data = base64.b64encode(_tobytes(tensor, "inputs")).decode("utf-8")
12
+ shape = list(tensor.shape)
13
+ dtype = str(tensor.dtype).split(".")[-1]
14
+ return tensor_data, shape, dtype
15
+
16
+
17
+ def unpack_tensor(tensor_data: str, shape: List[int], dtype: str) -> torch.Tensor:
18
+ tensor = base64.b64decode(tensor_data.encode("utf-8"))
19
+ DTYPE_MAP = {
20
+ "float16": torch.float16,
21
+ "float32": torch.float32,
22
+ "bfloat16": torch.bfloat16,
23
+ }
24
+ torch_dtype = DTYPE_MAP.get(dtype)
25
+ tensor = torch.frombuffer(bytearray(tensor), dtype=torch_dtype).reshape(shape)
26
+ return tensor
27
+
28
+
29
+ class EndpointHandler:
30
+ def __init__(self, path=""):
31
+ self.device = "cuda"
32
+ self.dtype = torch.float16
33
+ self.vae = (
34
+ AutoencoderKLHunyuanVideo.from_pretrained(
35
+ path, subfolder="vae", torch_dtype=self.dtype
36
+ )
37
+ .to(self.device, self.dtype)
38
+ .eval()
39
+ )
40
+
41
+ @torch.no_grad()
42
+ def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
43
+ """
44
+ Args:
45
+ data (:obj:):
46
+ includes the input data and the parameters for the inference.
47
+ """
48
+ tensor_data = data["inputs"]
49
+ parameters = data.get("parameters", {})
50
+ if "shape" not in parameters:
51
+ raise ValueError("Expected `shape` in parameters.")
52
+ if "dtype" not in parameters:
53
+ raise ValueError("Expected `dtype` in parameters.")
54
+
55
+ shape = parameters.get("shape")
56
+ dtype = parameters.get("dtype")
57
+
58
+ tensor = unpack_tensor(tensor_data, shape, dtype)
59
+
60
+ tensor = tensor.to(self.device, self.dtype)
61
+
62
+ tensor = tensor / self.vae.config.scaling_factor
63
+
64
+ with torch.no_grad():
65
+ frames = self.vae.decode(tensor, return_dict=False)[0]
66
+
67
+ tensor_data, shape, dtype = prepare_tensor(frames)
68
+
69
+ return {"tensor": tensor_data, "shape": shape, "dtype": dtype}
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ huggingface_hub
2
+ diffusers
3
+ imageio
4
+ imageio-ffmpeg
5
+ opencv-python