Update modeling.py
Browse files- modeling.py +6 -5
modeling.py
CHANGED
@@ -8,7 +8,7 @@ import torch
|
|
8 |
from .dino_wrapper2 import DinoWrapper
|
9 |
from .transformer import TriplaneTransformer
|
10 |
from .synthesizer_part import TriplaneSynthesizer
|
11 |
-
from .processor import LRMImageProcessor
|
12 |
|
13 |
class CameraEmbedder(nn.Module):
|
14 |
def __init__(self, raw_dim: int, embed_dim: int):
|
@@ -69,12 +69,13 @@ class LRMGenerator(PreTrainedModel):
|
|
69 |
triplane_dim=config.triplane_dim, samples_per_ray=config.rendering_samples_per_ray,
|
70 |
)
|
71 |
|
72 |
-
def forward(self, image):
|
73 |
|
74 |
# we use image processor directly in the forward pass
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
78 |
|
79 |
assert image.shape[0] == camera.shape[0], "Batch size mismatch"
|
80 |
N = image.shape[0]
|
|
|
8 |
from .dino_wrapper2 import DinoWrapper
|
9 |
from .transformer import TriplaneTransformer
|
10 |
from .synthesizer_part import TriplaneSynthesizer
|
11 |
+
# from .processor import LRMImageProcessor
|
12 |
|
13 |
class CameraEmbedder(nn.Module):
|
14 |
def __init__(self, raw_dim: int, embed_dim: int):
|
|
|
69 |
triplane_dim=config.triplane_dim, samples_per_ray=config.rendering_samples_per_ray,
|
70 |
)
|
71 |
|
72 |
+
def forward(self, image, camera):
|
73 |
|
74 |
# we use image processor directly in the forward pass
|
75 |
+
#TODO: we should have the following:
|
76 |
+
# processor = AutoProcessor.from_pretrained("jadechoghari/vfusion3d")
|
77 |
+
# processed_image, source_camera = processor(image)
|
78 |
+
#
|
79 |
|
80 |
assert image.shape[0] == camera.shape[0], "Batch size mismatch"
|
81 |
N = image.shape[0]
|