hlky HF staff commited on
Commit
0c28674
·
verified ·
1 Parent(s): 2400eeb

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +31 -44
handler.py CHANGED
@@ -1,70 +1,57 @@
1
- from typing import Dict, List, Any
2
 
 
3
  import torch
4
 
5
  from diffusers import AutoencoderKLHunyuanVideo
6
  from diffusers.video_processor import VideoProcessor
7
- from safetensors.torch import _tobytes
8
-
9
-
10
- def unpack_tensor(tensor_data: bytes, shape: List[int], dtype: str) -> torch.Tensor:
11
- tensor = tensor_data
12
- DTYPE_MAP = {
13
- "float16": torch.float16,
14
- "float32": torch.float32,
15
- "bfloat16": torch.bfloat16,
16
- }
17
- torch_dtype = DTYPE_MAP.get(dtype)
18
- tensor = torch.frombuffer(bytearray(tensor), dtype=torch_dtype).reshape(shape)
19
- return tensor
20
-
21
 
22
  class EndpointHandler:
23
  def __init__(self, path=""):
24
  self.device = "cuda"
25
  self.dtype = torch.float16
26
- self.vae = (
27
- AutoencoderKLHunyuanVideo.from_pretrained(
28
- path, subfolder="vae", torch_dtype=self.dtype
29
- )
30
- .to(self.device, self.dtype)
31
- .eval()
32
- )
33
 
34
- self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio
35
  self.video_processor = VideoProcessor(
36
  vae_scale_factor=self.vae_scale_factor_spatial
37
  )
38
 
39
  @torch.no_grad()
40
- def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
41
  """
42
  Args:
43
  data (:obj:):
44
  includes the input data and the parameters for the inference.
45
  """
46
- tensor_data = data["inputs"]
47
- parameters = data.get("parameters", {})
48
- if "shape" not in parameters:
49
- raise ValueError("Expected `shape` in parameters.")
50
- if "dtype" not in parameters:
51
- raise ValueError("Expected `dtype` in parameters.")
52
-
53
- shape = parameters.get("shape")
54
- dtype = parameters.get("dtype")
55
-
56
- tensor = unpack_tensor(tensor_data, shape, dtype)
57
 
58
  tensor = tensor.to(self.device, self.dtype)
59
 
60
- tensor = tensor / self.vae.config.scaling_factor
 
61
 
62
  with torch.no_grad():
63
- frames = self.vae.decode(tensor, return_dict=False)[0]
64
-
65
- frames = frames[0].permute(1, 0, 2, 3)
66
- frames = torch.stack([(frame * 0.5 + 0.5).clamp(0, 1) for frame in frames])
67
- frames = frames.permute(0, 2, 3, 1).contiguous().float()
68
- frames = (frames * 255).round().to(torch.uint8)
69
-
70
- return _tobytes(frames, "frames")
 
 
 
 
 
 
 
 
 
1
+ from typing import cast, Union
2
 
3
+ import PIL.Image
4
  import torch
5
 
6
  from diffusers import AutoencoderKLHunyuanVideo
7
  from diffusers.video_processor import VideoProcessor
8
+ from diffusers.utils import export_to_video
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  class EndpointHandler:
11
  def __init__(self, path=""):
12
  self.device = "cuda"
13
  self.dtype = torch.float16
14
+ self.vae = cast(AutoencoderKLHunyuanVideo, AutoencoderKLHunyuanVideo.from_pretrained(path, torch_dtype=self.dtype).to(self.device, self.dtype).eval())
 
 
 
 
 
 
15
 
16
+ self.vae_scale_factor = self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio
17
  self.video_processor = VideoProcessor(
18
  vae_scale_factor=self.vae_scale_factor_spatial
19
  )
20
 
21
  @torch.no_grad()
22
+ def __call__(self, data) -> Union[torch.Tensor, bytes]:
23
  """
24
  Args:
25
  data (:obj:):
26
  includes the input data and the parameters for the inference.
27
  """
28
+ tensor = cast(torch.Tensor, data["inputs"])
29
+ parameters = cast(dict, data.get("parameters", {}))
30
+ do_scaling = cast(bool, parameters.get("do_scaling", True))
31
+ output_type = cast(str, parameters.get("output_type", "pil"))
32
+ partial_postprocess = cast(bool, parameters.get("partial_postprocess", False))
33
+ if partial_postprocess and output_type != "pt":
34
+ output_type = "pt"
 
 
 
 
35
 
36
  tensor = tensor.to(self.device, self.dtype)
37
 
38
+ if do_scaling:
39
+ tensor = tensor / self.vae.config.scaling_factor
40
 
41
  with torch.no_grad():
42
+ frames = cast(torch.Tensor, self.vae.decode(tensor, return_dict=False)[0])
43
+
44
+ if partial_postprocess:
45
+ frames = frames[0].permute(1, 0, 2, 3)
46
+ frames = torch.stack([(frame * 0.5 + 0.5).clamp(0, 1) for frame in frames])
47
+ frames = frames.permute(0, 2, 3, 1).contiguous().float()
48
+ frames = (frames * 255).round().to(torch.uint8)
49
+ elif output_type == "pil" or output_type == "pt":
50
+ frames = cast(torch.Tensor, self.video_processor.postprocess_video(frames, output_type="pt")[0])
51
+ elif output_type == "mp4":
52
+ frames = torch.Tensor, self.video_processor.postprocess_video(frames, output_type="pil")[0]
53
+ path = export_to_video(frames, fps=15)
54
+ with open(path, "rb") as f:
55
+ frames = f.read()
56
+
57
+ return frames