File size: 1,731 Bytes
3bd162c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from typing import Dict, Any
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
from PIL import Image
import torch

class EndpointHandler():
    def __init__(self, path=""):
        # Ładowanie modelu i procesora
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model = AutoModelForZeroShotObjectDetection.from_pretrained(path).to(self.device)
        self.processor = AutoProcessor.from_pretrained(path)

    def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
        # Sprawdź, czy dane wejściowe zawierają wymagane pola
        if "image" not in data or "text" not in data:
            return {"error": "Payload must contain 'image' (base64 or URL) and 'text' (queries)."}

        # Załaduj obraz
        image = Image.open(data["image"]) if isinstance(data["image"], str) else data["image"]

        # Pobierz teksty zapytań
        text_queries = data["text"]
        if isinstance(text_queries, list):
            text_queries = ". ".join([t.lower().strip() + "." for t in text_queries])

        # Przygotuj dane wejściowe
        inputs = self.processor(images=image, text=text_queries, return_tensors="pt").to(self.device)
        
        # Przeprowadź inferencję
        with torch.no_grad():
            outputs = self.model(**inputs)

        # Post-process detekcji
        results = self.processor.post_process_grounded_object_detection(
            outputs,
            inputs.input_ids,
            box_threshold=0.4,
            text_threshold=0.3,
            target_sizes=[image.size[::-1]]
        )

        # Przygotuj wynik
        return {
            "detections": results
        }