|
import os |
|
import uuid |
|
import base64 |
|
from typing import List |
|
import time |
|
import psutil |
|
import ollama |
|
import uvicorn |
|
from fastapi import FastAPI, File, UploadFile, Form, HTTPException |
|
from fastapi.responses import JSONResponse |
|
|
|
try: |
|
import pynvml |
|
pynvml.nvmlInit() |
|
GPU_METRICS_AVAILABLE = True |
|
except (ImportError, pynvml.NVMLError): |
|
GPU_METRICS_AVAILABLE = False |
|
|
|
from video_processor import extract_frames, FrameSamplingMethod, encode_frames_to_base64 |
|
|
|
import logging |
|
import argparse |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--model_name", type=str, default="qwen2.5vl-int4:latest") |
|
args = parser.parse_args() |
|
|
|
os.makedirs(f'logs/{args.model_name}', exist_ok=True) |
|
|
|
|
|
app = FastAPI(title="Qwen2.5-VL Video Inference Service") |
|
|
|
|
|
TEMP_VIDEO_DIR = "temp_videos" |
|
os.makedirs(TEMP_VIDEO_DIR, exist_ok=True) |
|
|
|
|
|
log_filename = f"logs/{args.model_name}/{time.strftime('%Y%m%d_%H%M%S')}.log" |
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', filename=log_filename, filemode='a') |
|
|
|
@app.post("/video-inference/") |
|
async def video_inference( |
|
prompt: str = Form(...), |
|
video_file: UploadFile = File(...), |
|
sampling_method: FrameSamplingMethod = Form(FrameSamplingMethod.CONTENT_AWARE), |
|
sampling_rate: int = Form(5), |
|
): |
|
""" |
|
接收视频和文本提示,进行推理并返回结果。 |
|
- prompt: 用户的问题。 |
|
- video_file: 上传的视频文件。 |
|
- sampling_method: 帧采样方法 ('uniform' 或 'content_aware')。 |
|
- sampling_rate: 采样率或阈值。 |
|
""" |
|
|
|
request_start_time = time.time() |
|
request_id = str(uuid.uuid4()) |
|
logging.info(f"[{request_id}] Received new video inference request. Prompt: '{prompt}', Video: '{video_file.filename}'") |
|
|
|
|
|
if not video_file.content_type.startswith("video/"): |
|
logging.error(f"[{request_id}] Uploaded file '{video_file.filename}' is not a video. Content-Type: {video_file.content_type}") |
|
raise HTTPException(status_code=400, detail="Uploaded file is not a video.") |
|
|
|
|
|
file_extension = os.path.splitext(video_file.filename)[1] |
|
temp_video_path = os.path.join(TEMP_VIDEO_DIR, f"{request_id}{file_extension}") |
|
|
|
try: |
|
|
|
with open(temp_video_path, "wb") as buffer: |
|
content = await video_file.read() |
|
buffer.write(content) |
|
logging.info(f"[{request_id}] Video saved to temporary file: {temp_video_path}") |
|
|
|
logging.info(f"[{request_id}] Extracting frames using method: {sampling_method.value}, rate/threshold: {sampling_rate}") |
|
frames = extract_frames(temp_video_path, sampling_method, sampling_rate) |
|
if not frames: |
|
logging.error(f"[{request_id}] Could not extract any frames from the video: {temp_video_path}") |
|
raise HTTPException(status_code=400, detail="Could not extract any frames from the video.") |
|
|
|
logging.info(f"[{request_id}] Extracted {len(frames)} frames successfully.") |
|
|
|
|
|
base64_frames = encode_frames_to_base64(frames) |
|
logging.info(f"[{request_id}] Encoded {len(base64_frames)} frames to Base64.") |
|
|
|
|
|
final_prompt = ( |
|
f"请分析以下从视频中按时间顺序提取的图像帧序列。" |
|
f"根据这些帧回答用户的问题。\n\n" |
|
f"用户问题: \"{prompt}\"" |
|
) |
|
|
|
|
|
try: |
|
logging.info(f"[{request_id}] Sending request to Ollama model '{args.model_name}'...") |
|
|
|
|
|
psutil.cpu_percent(interval=None) |
|
psutil.cpu_percent(interval=None, percpu=True) |
|
|
|
ollama_start_time = time.time() |
|
response = ollama.chat( |
|
model=args.model_name, |
|
messages=[ |
|
{ |
|
'role': 'user', |
|
'content': final_prompt, |
|
'images': base64_frames, |
|
} |
|
] |
|
) |
|
ollama_end_time = time.time() |
|
|
|
|
|
cpu_usage = psutil.cpu_percent(interval=None) |
|
cpu_core_utilization = psutil.cpu_percent(interval=None, percpu=True) |
|
|
|
logging.info(f"[{request_id}] Received response from Ollama successfully.") |
|
|
|
|
|
total_request_processing_time = time.time() - request_start_time |
|
ollama_total_latency = ollama_end_time - ollama_start_time |
|
|
|
eval_count = response.get('eval_count', 0) |
|
eval_duration_ns = response.get('eval_duration', 1) |
|
tokens_per_second = eval_count / (eval_duration_ns / 1e9) if eval_duration_ns > 0 else 0 |
|
|
|
load_duration_ns = response.get('load_duration', 0) |
|
prompt_eval_duration_ns = response.get('prompt_eval_duration', 0) |
|
first_token_latency = prompt_eval_duration_ns / 1e9 |
|
|
|
cpu_freq_info = psutil.cpu_freq() |
|
cpu_freq = cpu_freq_info.current if cpu_freq_info else 'N/A' |
|
|
|
gpu_metrics_log = "Not available (pynvml not installed or NVIDIA driver issue)" |
|
if GPU_METRICS_AVAILABLE: |
|
try: |
|
handle = pynvml.nvmlDeviceGetHandleByIndex(0) |
|
utilization = pynvml.nvmlDeviceGetUtilizationRates(handle) |
|
memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle) |
|
gpu_metrics_log = ( |
|
f"GPU Utilization: {utilization.gpu}%, " |
|
f"Memory Used: {memory_info.used / (1024**2):.2f}/{memory_info.total / (1024**2):.2f} MB" |
|
) |
|
except pynvml.NVMLError as e: |
|
gpu_metrics_log = f"Could not retrieve GPU metrics: {e}" |
|
|
|
|
|
log_message = f""" |
|
[{request_id}] --- Performance & System Metrics --- |
|
[Request Info] |
|
- Prompt: "{prompt}" |
|
- Model: {response.get('model')} |
|
[Latency & Throughput] |
|
- Tokens/Second: {tokens_per_second:.2f} |
|
- Latency (First Token): {first_token_latency:.4f} s |
|
- Latency (Ollama Total): {ollama_total_latency:.4f} s |
|
- Batch Processing Latency (Total Request Time): {total_request_processing_time:.4f} s |
|
- Throughput (for this request): {1/total_request_processing_time if total_request_processing_time > 0 else float('inf'):.2f} req/s |
|
[Token Usage] |
|
- Prompt Tokens: {response.get('prompt_eval_count', 'N/A')} |
|
- Response Tokens: {eval_count} |
|
[System Usage at Completion] |
|
- CPU Usage: {cpu_usage}% |
|
- CPU Core Utilization: {cpu_core_utilization}% |
|
- CPU Frequency: {cpu_freq} MHz |
|
- GPU: {gpu_metrics_log} |
|
[Response] |
|
- {response['message']['content']} |
|
----------------------------------------------------""" |
|
logging.info(log_message) |
|
|
|
|
|
return JSONResponse(content={"response": response['message']['content']}) |
|
|
|
except Exception as ollama_error: |
|
|
|
logging.error(f"[{request_id}] Ollama inference failed: {str(ollama_error)}", exc_info=True) |
|
raise HTTPException(status_code=503, detail=f"Ollama inference failed: {str(ollama_error)}") |
|
|
|
except Exception as e: |
|
logging.error(f"[{request_id}] An error occurred during processing: {str(e)}", exc_info=True) |
|
raise HTTPException(status_code=500, detail=f"An error occurred during processing: {str(e)}") |
|
finally: |
|
|
|
if os.path.exists(temp_video_path): |
|
os.remove(temp_video_path) |
|
logging.info(f"[{request_id}] Cleaned up temporary file: {temp_video_path}") |
|
|
|
if __name__ == "__main__": |
|
uvicorn.run(app, host="0.0.0.0", port=8008) |