Commit
·
3d760ea
1
Parent(s):
dc4d943
the latest version based on inference.py
Browse files- handler.py +65 -130
- inference.py +415 -0
handler.py
CHANGED
@@ -1,142 +1,77 @@
|
|
1 |
import os
|
2 |
-
import torch
|
3 |
import base64
|
4 |
-
from PIL import Image
|
5 |
-
from io import BytesIO
|
6 |
-
from typing import Dict, Any
|
7 |
-
from transformers import LlamaTokenizer, GenerationConfig
|
8 |
-
from robohusky.model.modeling_husky_embody2 import HuskyForConditionalGeneration
|
9 |
-
from decord import VideoReader, cpu
|
10 |
-
import torchvision.transforms as T
|
11 |
-
from torchvision.transforms.functional import InterpolationMode
|
12 |
import tempfile
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
model_path, torch_dtype=torch.float16 if self.device == "cuda" else torch.float32
|
25 |
-
).to(self.device).eval()
|
26 |
-
|
27 |
-
self.gen_config = GenerationConfig(
|
28 |
-
bos_token_id=1,
|
29 |
-
do_sample=False,
|
30 |
-
# temperature=0.7,
|
31 |
-
max_new_tokens=10240
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
|
36 |
-
inputs = self.preprocess(data)
|
37 |
-
prediction = self.inference(inputs)
|
38 |
-
return self.postprocess(prediction)
|
39 |
-
|
40 |
-
def preprocess(self, request: Dict[str, Any]) -> Dict[str, Any]:
|
41 |
-
prompt = request["inputs"]
|
42 |
-
image_b64 = request.get("image", None)
|
43 |
-
video_b64 = request.get("video", None)
|
44 |
-
|
45 |
-
pixel_values = None
|
46 |
-
|
47 |
-
if image_b64:
|
48 |
-
image_bytes = base64.b64decode(image_b64)
|
49 |
-
pixel_values = self._load_image(image_bytes).unsqueeze(0) # [1, 3, 224, 224]
|
50 |
-
if self.device == "cuda":
|
51 |
-
pixel_values = pixel_values.half()
|
52 |
-
pixel_values = pixel_values.to(self.device)
|
53 |
-
prompt = prompt.replace("<image>", DEFAULT_IMG_START_TOKEN + DEFAULT_IMG_END_TOKEN)
|
54 |
-
|
55 |
-
elif video_b64:
|
56 |
-
video_bytes = base64.b64decode(video_b64)
|
57 |
-
pixel_values = self._load_video(video_bytes)
|
58 |
-
if self.device == "cuda":
|
59 |
-
pixel_values = pixel_values.half()
|
60 |
-
pixel_values = pixel_values.to(self.device)
|
61 |
-
prompt = prompt.replace("<video>", DEFAULT_VIDEO_START_TOKEN + DEFAULT_VIDEO_END_TOKEN)
|
62 |
-
|
63 |
-
return {
|
64 |
-
"prompt": prompt,
|
65 |
-
"pixel_values": pixel_values
|
66 |
-
}
|
67 |
-
|
68 |
-
def inference(self, inputs: Dict[str, Any]) -> str:
|
69 |
-
prompt = inputs["prompt"]
|
70 |
-
pixel_values = inputs["pixel_values"]
|
71 |
-
|
72 |
-
model_inputs = self.tokenizer([prompt], return_tensors="pt")
|
73 |
-
model_inputs.pop("token_type_ids", None)
|
74 |
-
model_inputs = {k: v.to(self.device) for k, v in model_inputs.items()}
|
75 |
-
|
76 |
-
if pixel_values is not None:
|
77 |
-
output = self.model.generate(
|
78 |
-
**model_inputs,
|
79 |
-
pixel_values=pixel_values,
|
80 |
-
max_new_tokens=self.gen_config.max_new_tokens, # 👈 显式传入
|
81 |
-
generation_config=self.gen_config,
|
82 |
-
return_dict_in_generate=True,
|
83 |
-
output_scores=True
|
84 |
-
)
|
85 |
else:
|
86 |
-
|
87 |
-
|
88 |
-
generation_config=self.gen_config,
|
89 |
-
return_dict_in_generate=True,
|
90 |
-
output_scores=True
|
91 |
-
)
|
92 |
-
# 🧠 打印 debug 信息
|
93 |
-
generated_ids = output.sequences[0]
|
94 |
-
print("📍生成的 token ids:", generated_ids.tolist())
|
95 |
-
raw_text = self.tokenizer.decode(generated_ids, skip_special_tokens=False)
|
96 |
-
clean_text = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
|
97 |
-
print("🧾 带特殊符号的输出:", raw_text)
|
98 |
-
print("✅ 去掉特殊符号的输出:", clean_text)
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
return {"output": output.strip()}
|
103 |
|
104 |
-
|
105 |
-
|
106 |
-
crop_pct = 224 / 256
|
107 |
-
size = int(224 / crop_pct)
|
108 |
-
transform = T.Compose([
|
109 |
-
T.Resize(size, interpolation=InterpolationMode.BICUBIC),
|
110 |
-
T.CenterCrop(224),
|
111 |
-
T.ToTensor(),
|
112 |
-
T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
|
113 |
-
])
|
114 |
-
return transform(image)
|
115 |
|
116 |
-
|
117 |
-
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
|
118 |
-
tmpfile.write(video_bytes)
|
119 |
-
video_path = tmpfile.name
|
120 |
-
|
121 |
-
vr = VideoReader(video_path, ctx=cpu(0))
|
122 |
-
total_frames = len(vr)
|
123 |
-
indices = self.get_index(total_frames, num_segments)
|
124 |
-
frames = [Image.fromarray(vr[i].asnumpy()) for i in indices]
|
125 |
-
|
126 |
-
transform = T.Compose([
|
127 |
-
T.Resize(224, interpolation=InterpolationMode.BICUBIC),
|
128 |
-
T.CenterCrop(224),
|
129 |
-
T.ToTensor(),
|
130 |
-
T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
|
131 |
-
])
|
132 |
-
processed = [transform(frame) for frame in frames] # each: [3, 224, 224]
|
133 |
-
video_tensor = torch.stack(processed, dim=0) # [T, 3, 224, 224]
|
134 |
-
video_tensor = video_tensor.permute(1, 0, 2, 3) # [3, T, 224, 224]
|
135 |
-
video_tensor = video_tensor.unsqueeze(0) # [1, 3, T, 224, 224] ✅
|
136 |
-
return video_tensor
|
137 |
-
|
138 |
-
def get_index(self, num_frames: int, num_segments: int):
|
139 |
-
if num_frames < num_segments:
|
140 |
-
return list(range(num_frames)) + [num_frames - 1] * (num_segments - num_frames)
|
141 |
-
interval = num_frames / num_segments
|
142 |
-
return [int(interval * i) for i in range(num_segments)]
|
|
|
1 |
import os
|
|
|
2 |
import base64
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import tempfile
|
4 |
+
from inference import Chat, get_conv_template
|
5 |
+
import torch
|
6 |
|
7 |
+
def save_base64_to_tempfile(base64_str, suffix):
|
8 |
+
header_removed = base64_str
|
9 |
+
# 去除可能的data:image/...;base64,前缀
|
10 |
+
if ',' in base64_str:
|
11 |
+
header_removed = base64_str.split(',', 1)[1]
|
12 |
|
13 |
+
data = base64.b64decode(header_removed)
|
14 |
+
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
|
15 |
+
tmp.write(data)
|
16 |
+
tmp.close()
|
17 |
+
return tmp.name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
class EndpointHandler:
|
20 |
+
def __init__(self, model_path: str):
|
21 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
22 |
+
self.chat = Chat(
|
23 |
+
model_path=model_path,
|
24 |
+
device=device,
|
25 |
+
num_gpus=1,
|
26 |
+
max_new_tokens=1024,
|
27 |
+
load_8bit=False,
|
28 |
)
|
29 |
+
self.vision_feature = None
|
30 |
+
self.modal_type = "text"
|
31 |
+
self.chat.conv = get_conv_template("husky").copy()
|
32 |
+
|
33 |
+
def __call__(self, data: dict) -> dict:
|
34 |
+
# reset conversation if specified
|
35 |
+
if data.get("clear_history"):
|
36 |
+
self.chat.conv = get_conv_template("husky").copy()
|
37 |
+
self.vision_feature = None
|
38 |
+
self.modal_type = "text"
|
39 |
+
|
40 |
+
prompt = data.get("inputs", "")
|
41 |
+
image_input = data.get("image", None)
|
42 |
+
video_input = data.get("video", None)
|
43 |
+
|
44 |
+
# 判断image输入是路径还是base64字符串
|
45 |
+
if image_input:
|
46 |
+
if os.path.exists(image_input):
|
47 |
+
# 直接用路径
|
48 |
+
self.vision_feature = self.chat.get_image_embedding(image_input)
|
49 |
+
else:
|
50 |
+
# base64字符串,保存临时文件再处理
|
51 |
+
tmp_path = save_base64_to_tempfile(image_input, suffix=".jpg")
|
52 |
+
self.vision_feature = self.chat.get_image_embedding(tmp_path)
|
53 |
+
os.unlink(tmp_path) # 删除临时文件
|
54 |
+
self.modal_type = "image"
|
55 |
+
self.chat.conv = get_conv_template("husky").copy()
|
56 |
+
|
57 |
+
elif video_input:
|
58 |
+
if os.path.exists(video_input):
|
59 |
+
self.vision_feature = self.chat.get_video_embedding(video_input)
|
60 |
+
else:
|
61 |
+
tmp_path = save_base64_to_tempfile(video_input, suffix=".mp4")
|
62 |
+
self.vision_feature = self.chat.get_video_embedding(tmp_path)
|
63 |
+
os.unlink(tmp_path)
|
64 |
+
self.modal_type = "video"
|
65 |
+
self.chat.conv = get_conv_template("husky").copy()
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
else:
|
68 |
+
self.modal_type = "text"
|
69 |
+
self.vision_feature = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
+
conversations = self.chat.ask(prompt, self.chat.conv, modal_type=self.modal_type)
|
72 |
+
output = self.chat.answer(conversations, self.vision_feature, modal_type=self.modal_type)
|
|
|
73 |
|
74 |
+
# 更新对话历史
|
75 |
+
self.chat.conv.messages[-1][1] = output.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
+
return {"output": output.strip()}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inference.py
ADDED
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
srun -p INTERN2 --job-name='husky_multi_test' --gres=gpu:1 --cpus-per-task=8 --quotatype="auto" python -u demo/inference_new.py
|
3 |
+
"""
|
4 |
+
|
5 |
+
import abc
|
6 |
+
from typing import Optional
|
7 |
+
|
8 |
+
import os
|
9 |
+
import requests
|
10 |
+
from PIL import Image
|
11 |
+
from io import BytesIO
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import torchvision.transforms as T
|
15 |
+
from peft import PeftModel
|
16 |
+
from torchvision.transforms.functional import InterpolationMode
|
17 |
+
|
18 |
+
from transformers import (
|
19 |
+
LlamaTokenizer,
|
20 |
+
GenerationConfig,
|
21 |
+
StoppingCriteria,
|
22 |
+
StoppingCriteriaList,
|
23 |
+
)
|
24 |
+
|
25 |
+
from robohusky.model.modeling_husky_embody2 import HuskyForConditionalGeneration
|
26 |
+
|
27 |
+
from robohusky.conversation import (
|
28 |
+
conv_templates,
|
29 |
+
get_conv_template,
|
30 |
+
)
|
31 |
+
|
32 |
+
from robohusky.video_transformers import (
|
33 |
+
GroupNormalize,
|
34 |
+
GroupScale,
|
35 |
+
GroupCenterCrop,
|
36 |
+
Stack,
|
37 |
+
ToTorchFormatTensor,
|
38 |
+
get_index,
|
39 |
+
)
|
40 |
+
|
41 |
+
from robohusky.compression import compress_module
|
42 |
+
from decord import VideoReader, cpu
|
43 |
+
|
44 |
+
# import deepspeed
|
45 |
+
|
46 |
+
IGNORE_INDEX = -100
|
47 |
+
DEFAULT_UNK_TOKEN = "<unk>"
|
48 |
+
DEFAULT_IMG_START_TOKEN = "<img>"
|
49 |
+
DEFAULT_IMG_END_TOKEN = "</img>"
|
50 |
+
|
51 |
+
DEFAULT_VIDEO_START_TOKEN = "<vid>"
|
52 |
+
DEFAULT_VIDEO_END_TOKEN = "</vid>"
|
53 |
+
|
54 |
+
def get_gpu_memory(max_gpus=None):
|
55 |
+
gpu_memory = []
|
56 |
+
num_gpus = (
|
57 |
+
torch.cuda.device_count()
|
58 |
+
if max_gpus is None
|
59 |
+
else min(max_gpus, torch.cuda.device_count())
|
60 |
+
)
|
61 |
+
|
62 |
+
for gpu_id in range(num_gpus):
|
63 |
+
with torch.cuda.device(gpu_id):
|
64 |
+
device = torch.cuda.current_device()
|
65 |
+
gpu_properties = torch.cuda.get_device_properties(device)
|
66 |
+
total_memory = gpu_properties.total_memory / (1024 ** 3)
|
67 |
+
allocated_memory = torch.cuda.memory_allocated() / (1024 ** 3)
|
68 |
+
available_memory = total_memory - allocated_memory
|
69 |
+
gpu_memory.append(available_memory)
|
70 |
+
return gpu_memory
|
71 |
+
|
72 |
+
def load_model(
|
73 |
+
model_path, device, num_gpus, max_gpu_memory=None, load_8bit=False, lora_weights=None
|
74 |
+
):
|
75 |
+
if device == "cpu":
|
76 |
+
kwargs = {}
|
77 |
+
elif device == "cuda":
|
78 |
+
kwargs = {"torch_dtype": torch.float16}
|
79 |
+
if num_gpus == "auto":
|
80 |
+
kwargs["device_map"] = "auto"
|
81 |
+
else:
|
82 |
+
num_gpus = int(num_gpus)
|
83 |
+
if num_gpus != 1:
|
84 |
+
kwargs["device_map"] = "auto"
|
85 |
+
if max_gpu_memory is None:
|
86 |
+
kwargs[
|
87 |
+
"device_map"
|
88 |
+
] = "sequential" # This is important for not the same VRAM sizes
|
89 |
+
available_gpu_memory = get_gpu_memory(num_gpus)
|
90 |
+
kwargs["max_memory"] = {
|
91 |
+
i: str(int(available_gpu_memory[i] * 0.85)) + "GiB"
|
92 |
+
for i in range(num_gpus)
|
93 |
+
}
|
94 |
+
else:
|
95 |
+
kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)}
|
96 |
+
else:
|
97 |
+
raise ValueError(f"Invalid device: {device}")
|
98 |
+
|
99 |
+
tokenizer = LlamaTokenizer.from_pretrained(
|
100 |
+
model_path, use_fast=False)
|
101 |
+
|
102 |
+
if lora_weights is None:
|
103 |
+
model = HuskyForConditionalGeneration.from_pretrained(
|
104 |
+
model_path, low_cpu_mem_usage=True, **kwargs
|
105 |
+
)
|
106 |
+
else:
|
107 |
+
kwargs["device_map"] = "auto"
|
108 |
+
model = HuskyForConditionalGeneration.from_pretrained(
|
109 |
+
model_path, low_cpu_mem_usage=True, **kwargs
|
110 |
+
)
|
111 |
+
model.language_model = PeftModel.from_pretrained(
|
112 |
+
model.language_model,
|
113 |
+
lora_weights,
|
114 |
+
**kwargs
|
115 |
+
)
|
116 |
+
|
117 |
+
if load_8bit:
|
118 |
+
compress_module(model, device)
|
119 |
+
|
120 |
+
if (device == "cuda" and num_gpus == 1) or device == "mps":
|
121 |
+
model.to(device)
|
122 |
+
|
123 |
+
model = model.eval()
|
124 |
+
return model, tokenizer
|
125 |
+
|
126 |
+
def load_image(image_file, input_size=224):
|
127 |
+
if image_file.startswith('http') or image_file.startswith('https'):
|
128 |
+
response = requests.get(image_file)
|
129 |
+
image = Image.open(BytesIO(response.content)).convert('RGB')
|
130 |
+
else:
|
131 |
+
image = Image.open(image_file).convert('RGB')
|
132 |
+
|
133 |
+
crop_pct = 224 / 256
|
134 |
+
size = int(input_size / crop_pct)
|
135 |
+
transform = T.Compose([
|
136 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
137 |
+
T.Resize(size, interpolation=InterpolationMode.BICUBIC),
|
138 |
+
T.CenterCrop(input_size),
|
139 |
+
T.ToTensor(),
|
140 |
+
T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
|
141 |
+
])
|
142 |
+
image = transform(image)
|
143 |
+
return image
|
144 |
+
|
145 |
+
def load_video(video_path, num_segments=8):
|
146 |
+
vr = VideoReader(video_path, ctx=cpu(0))
|
147 |
+
num_frames = len(vr)
|
148 |
+
frame_indices = get_index(num_frames, num_segments)
|
149 |
+
|
150 |
+
# transform
|
151 |
+
crop_size = 224
|
152 |
+
scale_size = 224
|
153 |
+
input_mean = [0.48145466, 0.4578275, 0.40821073]
|
154 |
+
input_std = [0.26862954, 0.26130258, 0.27577711]
|
155 |
+
|
156 |
+
transform = T.Compose([
|
157 |
+
GroupScale(int(scale_size), interpolation=InterpolationMode.BICUBIC),
|
158 |
+
GroupCenterCrop(crop_size),
|
159 |
+
Stack(),
|
160 |
+
ToTorchFormatTensor(),
|
161 |
+
GroupNormalize(input_mean, input_std)
|
162 |
+
])
|
163 |
+
|
164 |
+
images_group = list()
|
165 |
+
for frame_index in frame_indices:
|
166 |
+
img = Image.fromarray(vr[frame_index].asnumpy())
|
167 |
+
images_group.append(img)
|
168 |
+
video = transform(images_group)
|
169 |
+
return video
|
170 |
+
|
171 |
+
class StoppingCriteriaSub(StoppingCriteria):
|
172 |
+
|
173 |
+
def __init__(self, stops, encounters=1):
|
174 |
+
super().__init__()
|
175 |
+
self.stops = stops
|
176 |
+
|
177 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs):
|
178 |
+
for stop in self.stops:
|
179 |
+
if torch.all((stop == input_ids[0][-len(stop):])).item():
|
180 |
+
return True
|
181 |
+
|
182 |
+
return False
|
183 |
+
|
184 |
+
@torch.inference_mode()
|
185 |
+
def generate_stream(
|
186 |
+
model, tokenizer, image_processor, params, device
|
187 |
+
):
|
188 |
+
prompt = params["prompt"]
|
189 |
+
images = params.get("images", None)
|
190 |
+
videos = params.get("videos", None)
|
191 |
+
temperature = float(params.get("temperature", 0.7))
|
192 |
+
max_new_tokens = int(params.get("max_new_tokens", 1024))
|
193 |
+
|
194 |
+
num_queries = model.config.num_query_tokens
|
195 |
+
|
196 |
+
stop_words = ["Human: ", "Assistant: ", "###", "\n\n"]
|
197 |
+
stop_words_ids = [tokenizer(stop_word, return_tensors='pt')['input_ids'].squeeze() for stop_word in stop_words]
|
198 |
+
stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
|
199 |
+
|
200 |
+
generation_config = GenerationConfig(
|
201 |
+
bos_token_id=1,
|
202 |
+
do_sample=True,
|
203 |
+
temperature=temperature,
|
204 |
+
max_new_tokens=max_new_tokens,
|
205 |
+
stopping_criteria=stopping_criteria
|
206 |
+
)
|
207 |
+
|
208 |
+
pixel_values = None
|
209 |
+
if images is not None:
|
210 |
+
pixel_values = load_image(images).to(device) # only support one image
|
211 |
+
image_query = DEFAULT_IMG_START_TOKEN + DEFAULT_IMG_END_TOKEN
|
212 |
+
prompt = prompt.replace("<image>", image_query)
|
213 |
+
|
214 |
+
elif videos is not None:
|
215 |
+
pixel_values = load_video(videos).to(device)
|
216 |
+
video_query = DEFAULT_VIDEO_START_TOKEN + DEFAULT_VIDEO_END_TOKEN
|
217 |
+
prompt = prompt.replace("<video>", video_query)
|
218 |
+
|
219 |
+
model_inputs = tokenizer([prompt], return_tensors="pt")
|
220 |
+
model_inputs.pop("token_type_ids", None)
|
221 |
+
|
222 |
+
if pixel_values is not None:
|
223 |
+
model_inputs["pixel_values"] = pixel_values
|
224 |
+
|
225 |
+
generation_output = model.generate(
|
226 |
+
**model_inputs,
|
227 |
+
generation_config=generation_config,
|
228 |
+
return_dict_in_generate=True,
|
229 |
+
output_scores=True
|
230 |
+
)
|
231 |
+
else:
|
232 |
+
generation_output = model.language_model.generate(
|
233 |
+
**model_inputs,
|
234 |
+
generation_config=generation_config,
|
235 |
+
return_dict_in_generate=True,
|
236 |
+
output_scores=True
|
237 |
+
)
|
238 |
+
|
239 |
+
preds = generation_output.sequences
|
240 |
+
outputs = tokenizer.batch_decode(preds, skip_special_tokens=True)
|
241 |
+
return outputs
|
242 |
+
|
243 |
+
class Chat:
|
244 |
+
def __init__(
|
245 |
+
self,
|
246 |
+
model_path,
|
247 |
+
device,
|
248 |
+
num_gpus=1,
|
249 |
+
load_8bit=False,
|
250 |
+
temperature=0.7,
|
251 |
+
max_new_tokens=512,
|
252 |
+
lora_path=None,
|
253 |
+
):
|
254 |
+
model, tokenizer = load_model(
|
255 |
+
model_path, device, num_gpus, load_8bit=load_8bit, lora_weights=lora_path
|
256 |
+
)
|
257 |
+
|
258 |
+
self.model = model
|
259 |
+
# self.model.language_model = deepspeed.init_inference(
|
260 |
+
# self.model.language_model, mp_size=1, dtype=torch.float16, checkpoint=None, replace_with_kernel_inject=True)
|
261 |
+
self.tokenizer = tokenizer
|
262 |
+
num_queries = model.config.num_query_tokens
|
263 |
+
|
264 |
+
self.device = device
|
265 |
+
self.dtype = model.dtype
|
266 |
+
|
267 |
+
stop_words = ["Human: ", "Assistant: ", "###", "\n\n"]
|
268 |
+
stop_words_ids = [tokenizer(stop_word, return_tensors='pt')['input_ids'].squeeze() for stop_word in stop_words]
|
269 |
+
stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
|
270 |
+
|
271 |
+
self.conv = get_conv_template("husky")
|
272 |
+
|
273 |
+
self.image_query = DEFAULT_IMG_START_TOKEN + DEFAULT_IMG_END_TOKEN
|
274 |
+
self.video_query = DEFAULT_VIDEO_START_TOKEN + DEFAULT_VIDEO_END_TOKEN
|
275 |
+
|
276 |
+
self.generation_config = GenerationConfig(
|
277 |
+
bos_token_id=1,
|
278 |
+
do_sample=True,
|
279 |
+
top_k=20,
|
280 |
+
top_p=0.9,
|
281 |
+
temperature=temperature,
|
282 |
+
max_new_tokens=max_new_tokens,
|
283 |
+
stopping_criteria=stopping_criteria
|
284 |
+
)
|
285 |
+
|
286 |
+
def ask(self, text, conv, modal_type="image"):
|
287 |
+
assert modal_type in ["text", "image", "video"]
|
288 |
+
conversations = []
|
289 |
+
|
290 |
+
if len(conv.messages) > 0 or modal_type == "text":
|
291 |
+
conv.append_message(conv.roles[0], text)
|
292 |
+
elif modal_type == "image":
|
293 |
+
conv.append_message(conv.roles[0], self.image_query + "\n" + text)
|
294 |
+
else:
|
295 |
+
conv.append_message(conv.roles[0], self.video_query + "\n" + text)
|
296 |
+
|
297 |
+
conv.append_message(conv.roles[1], None)
|
298 |
+
conversations.append(conv.get_prompt())
|
299 |
+
return conversations
|
300 |
+
|
301 |
+
@torch.no_grad()
|
302 |
+
def get_image_embedding(self, image_file):
|
303 |
+
pixel_values = load_image(image_file)
|
304 |
+
pixel_values = pixel_values.unsqueeze(0).to(self.device, dtype=self.dtype)
|
305 |
+
language_model_inputs = self.model.extract_feature(pixel_values)
|
306 |
+
return language_model_inputs
|
307 |
+
|
308 |
+
@torch.no_grad()
|
309 |
+
def get_video_embedding(self, video_file):
|
310 |
+
pixel_values = load_video(video_file)
|
311 |
+
TC, H, W = pixel_values.shape
|
312 |
+
pixel_values = pixel_values.reshape(TC // 3, 3, H, W).transpose(0, 1) # [C, T, H, W]
|
313 |
+
pixel_values = pixel_values.unsqueeze(0).to(self.device, dtype=self.dtype)
|
314 |
+
assert len(pixel_values.shape) == 5
|
315 |
+
language_model_inputs = self.model.extract_feature(pixel_values)
|
316 |
+
return language_model_inputs
|
317 |
+
|
318 |
+
@torch.no_grad()
|
319 |
+
def answer(self, conversations, language_model_inputs, modal_type="image"):
|
320 |
+
model_inputs = self.tokenizer(
|
321 |
+
conversations,
|
322 |
+
return_tensors="pt",
|
323 |
+
)
|
324 |
+
model_inputs.pop("token_type_ids", None)
|
325 |
+
|
326 |
+
input_ids = model_inputs["input_ids"].to(self.device)
|
327 |
+
attention_mask = model_inputs["attention_mask"].to(self.device)
|
328 |
+
|
329 |
+
if modal_type == "text":
|
330 |
+
generation_output = self.model.language_model.generate(
|
331 |
+
input_ids=input_ids,
|
332 |
+
attention_mask=attention_mask,
|
333 |
+
generation_config=self.generation_config,
|
334 |
+
return_dict_in_generate=True,
|
335 |
+
output_scores=True
|
336 |
+
)
|
337 |
+
else:
|
338 |
+
pixel_values = model_inputs.pop("pixel_values", None)
|
339 |
+
if pixel_values is not None:
|
340 |
+
pixel_values = pixel_values.to(self.device)
|
341 |
+
|
342 |
+
generation_output = self.model.generate(
|
343 |
+
pixel_values=pixel_values,
|
344 |
+
input_ids=input_ids,
|
345 |
+
attention_mask=attention_mask,
|
346 |
+
language_model_inputs=language_model_inputs,
|
347 |
+
generation_config=self.generation_config,
|
348 |
+
return_dict_in_generate=True,
|
349 |
+
output_scores=True
|
350 |
+
)
|
351 |
+
|
352 |
+
preds = generation_output.sequences
|
353 |
+
outputs = self.tokenizer.batch_decode(preds, skip_special_tokens=True)[0]
|
354 |
+
|
355 |
+
if modal_type == "text":
|
356 |
+
skip_echo_len = len(conversations[0]) - conversations[0].count("</s>") * 3
|
357 |
+
outputs = outputs[skip_echo_len:].strip()
|
358 |
+
|
359 |
+
return outputs
|
360 |
+
|
361 |
+
if __name__ == '__main__':
|
362 |
+
# model_path = "/mnt/petrelfs/zhangqinglong/Documents/Husky/work_dirs/husky_v3/EmbodiedGPT/pretrain_0727"
|
363 |
+
model_path = "./"
|
364 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
365 |
+
chat = Chat(model_path, device=device, num_gpus=1, max_new_tokens=1024, load_8bit=False)
|
366 |
+
|
367 |
+
vision_feature = None
|
368 |
+
image_state = False
|
369 |
+
video_state = False
|
370 |
+
|
371 |
+
while True:
|
372 |
+
query = input("\n")
|
373 |
+
if query.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
|
374 |
+
if os.path.exists(query):
|
375 |
+
print("received.")
|
376 |
+
vision_feature = chat.get_image_embedding(query)
|
377 |
+
chat.conv = get_conv_template("husky").copy()
|
378 |
+
image_state = True
|
379 |
+
continue
|
380 |
+
if query.lower().endswith(('.mp4', '.mkv', '.avi', '.wmv', '.iso', ".webm")):
|
381 |
+
if os.path.exists(query):
|
382 |
+
print("received.")
|
383 |
+
vision_feature = chat.get_video_embedding(query)
|
384 |
+
chat.conv = get_conv_template("husky").copy()
|
385 |
+
video_state = True
|
386 |
+
continue
|
387 |
+
|
388 |
+
if query == "stop":
|
389 |
+
break
|
390 |
+
if query == "clear" or query == "" or query == "\n":
|
391 |
+
chat.conv = get_conv_template("husky").copy()
|
392 |
+
image_state = False
|
393 |
+
video_state = False
|
394 |
+
os.system("clear")
|
395 |
+
print("欢迎使用 husky-13b-zh 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
|
396 |
+
continue
|
397 |
+
|
398 |
+
if image_state:
|
399 |
+
modal_type = "image"
|
400 |
+
elif video_state:
|
401 |
+
modal_type = "video"
|
402 |
+
else:
|
403 |
+
modal_type = "text"
|
404 |
+
|
405 |
+
# image_test = "assets/husky.jpg"
|
406 |
+
# image_test = "assets/yoga.mp4"
|
407 |
+
# video_test = "assets/pretty_girl.mp4"
|
408 |
+
# video_test = "assets/stock-footage-billiards-concentrated-young-woman-playing-in-club.webm"
|
409 |
+
# video_test = "assets/stock-footage-kherson-ukraine-may-open-free-rock-music-festival-crowd-partying-at-a-rock-concert.webm"
|
410 |
+
conversations = chat.ask(text=query, conv=chat.conv, modal_type=modal_type)
|
411 |
+
outputs = chat.answer(conversations, vision_feature, modal_type=modal_type)
|
412 |
+
# NOTE: strip is important to align with the training data.
|
413 |
+
chat.conv.messages[-1][1] = outputs.strip()
|
414 |
+
|
415 |
+
print(f"Husky: \n{outputs}")
|