import numpy as np import torch import torchvision.transforms as T from decord import VideoReader, cpu from PIL import Image from torchvision.transforms.functional import InterpolationMode from transformers import AutoModel, AutoTokenizer from longva.model.builder import load_pretrained_model from longva.mm_utils import tokenizer_image_token from longva.constants import IMAGE_TOKEN_INDEX def load_images(images_path): images = [] for image_path in images_path: # 打开图像并转换为 NumPy 数组 image = Image.open(image_path).convert("RGB") # 确保图像是 RGB 格式 image_array = np.array(image) images.append(image_array) # 将所有图像组合成一个 NumPy 数组 images_array = np.stack(images, axis=0) return images_array class LongVA(object): def __init__(self, gpu=1, model_path="lmms-lab/LongVA-7B"): self.model_path = model_path self.device = torch.device(f"cuda:{gpu}" if torch.cuda.is_available() else "cpu") self.device_map = {"": f"cuda:{gpu}"} self.gen_kwargs = {"do_sample": True, "temperature": 0.5, "top_p": None, "num_beams": 1, "use_cache": True, "max_new_tokens": 1024} self.tokenizer, self.model, self.image_processor, _ = load_pretrained_model( model_path, None, "llava_qwen", device_map=self.device_map ) def inference(self, images_path, qa): prompt = f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n\nThis question is about the main topic discussed in the video. Question: {qa['question']} Choices: A) {qa['choice_a']} B) {qa['choice_b']} C) {qa['choice_c']} D) {qa['choice_d']}. Respond with a single capital letter (A, B, C, or D) only. No explanation. No punctuation. Just the letter.<|im_end|>\n<|im_start|>assistant\n" input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.model.device) #print(f"input_ids: {input_ids}") images = load_images(images_path) images_tensor = self.image_processor.preprocess(images, return_tensors="pt")["pixel_values"].to(self.model.device, dtype=torch.float16) with torch.inference_mode(): output_ids = self.model.generate(input_ids, images=[images_tensor], modalities=["video"], **self.gen_kwargs) result = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip() return result