|
|
|
from llava.model.builder import load_pretrained_model |
|
from llava.mm_utils import get_model_name_from_path, process_images, tokenizer_image_token |
|
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IGNORE_INDEX |
|
from llava.conversation import conv_templates, SeparatorStyle |
|
from PIL import Image |
|
import requests |
|
import copy |
|
import torch |
|
import sys |
|
import warnings |
|
from decord import VideoReader, cpu |
|
import numpy as np |
|
warnings.filterwarnings("ignore") |
|
|
|
def load_images(images_path): |
|
images = [] |
|
for image_path in images_path: |
|
|
|
image = Image.open(image_path).convert("RGB") |
|
image_array = np.array(image) |
|
images.append(image_array) |
|
|
|
|
|
images_array = np.stack(images, axis=0) |
|
return images_array |
|
|
|
class LLaVA_Video(object): |
|
def __init__(self, gpu=1, model_path="lmms-lab/LLaVA-Video-7B-Qwen2"): |
|
self.model_name = "llava_qwen" |
|
self.device = torch.device(f"cuda:{gpu}" if torch.cuda.is_available() else "cpu") |
|
self.device_map = {"": f"cuda:{gpu}"} |
|
self.tokenizer, self.model, self.image_processor, self.max_length = load_pretrained_model( |
|
model_path, None, |
|
self.model_name, torch_dtype="bfloat16", |
|
device_map=self.device_map |
|
) |
|
self.model.eval() |
|
|
|
def inference(self, images_path, qa): |
|
images = load_images(images_path) |
|
images = self.image_processor.preprocess(images, return_tensors="pt")["pixel_values"].to(self.device).to(torch.bfloat16) |
|
images = [images] |
|
conv_template = "qwen_1_5" |
|
question = DEFAULT_IMAGE_TOKEN + f"This question is about the main topic discussed in the video. Question: {qa['question']} Choices: A) {qa['choice_a']} B) {qa['choice_b']} C) {qa['choice_c']} D) {qa['choice_d']}. Respond with a single capital letter (A, B, C, or D) only. No explanation. No punctuation. Just the letter." |
|
conv = copy.deepcopy(conv_templates[conv_template]) |
|
conv.append_message(conv.roles[0], question) |
|
conv.append_message(conv.roles[1], None) |
|
prompt_question = conv.get_prompt() |
|
input_ids = tokenizer_image_token(prompt_question, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.device) |
|
'''print(type(input_ids)) |
|
print(type(images))''' |
|
cont = self.model.generate( |
|
input_ids, |
|
images=images, |
|
modalities= ["video"], |
|
do_sample=False, |
|
temperature=0, |
|
max_new_tokens=4096, |
|
) |
|
result = self.tokenizer.batch_decode(cont, skip_special_tokens=True)[0].strip() |
|
return result |
|
|
|
|
|
|