lambertxiao commited on
Commit
746c807
·
verified ·
1 Parent(s): aefbd72

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
De_DiffusionV2_Image.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Optional, Tuple
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import numpy as np
7
+ from transformers.utils import ModelOutput
8
+ from transformers.modeling_utils import PreTrainedModel
9
+ import torchvision.transforms as transforms
10
+ import os
11
+ from safetensors.torch import load_file
12
+
13
+ from .build_unfreeze import load_sd_model, load_Florence2_model
14
+ from .utils import initiate_time_steps, normalize
15
+
16
+
17
+ class MLP(nn.Module):
18
+ def __init__(self, input_dim, output_dim):
19
+ super(MLP, self).__init__()
20
+ self.layers = nn.Sequential(
21
+ nn.Linear(input_dim, output_dim),
22
+ nn.GELU(),
23
+ nn.Linear(output_dim, output_dim),
24
+ )
25
+
26
+ def forward(self, x):
27
+ return self.layers(x)
28
+
29
+ @dataclass
30
+ class SDOutput(ModelOutput):
31
+ loss: Optional[torch.FloatTensor] = None
32
+
33
+ class SDModel(PreTrainedModel):
34
+ def __init__(
35
+ self,
36
+ config = None,
37
+ training_args = None,
38
+ ):
39
+ super().__init__(config)
40
+ self.training_args = training_args
41
+ if self.training_args.fp32:
42
+ self._dtype = torch.float32
43
+ else:
44
+ self._dtype = torch.bfloat16
45
+ # Change device to _device to avoid conflict with nn.Module
46
+ self._device = torch.device(config.device if hasattr(config, 'device') else "cuda" if torch.cuda.is_available() else "cpu")
47
+
48
+ self.vae, self.tokenizer, self.text_encoder, self.unet, self.scheduler = load_sd_model(training_args)
49
+ torch.cuda.empty_cache()
50
+ self.unet.eval()
51
+ self.text_encoder.eval()
52
+ self.model, self.processor = load_Florence2_model(training_args)
53
+ self.config = config
54
+ # Move models to appropriate device
55
+ self.unet = self.unet.to(self._dtype).to(self._device)
56
+ self.text_encoder = self.text_encoder.to(self._dtype).to_empty(device=self._device)
57
+ self.model = self.model.to(self._dtype).to_empty(device=self._device)
58
+ self.vae = self.vae.to(torch.float32).to_empty(device=self._device)
59
+
60
+ self.batch_size = self.training_args.batch_size
61
+
62
+ hidden_dim = 1024
63
+ self.language_proj = nn.Sequential(
64
+ nn.Linear(1024, hidden_dim, dtype=self._dtype),
65
+ nn.GELU(),
66
+ nn.Linear(hidden_dim, 1024, dtype=self._dtype)
67
+ ).to_empty(device=self._device)
68
+ for param in self.language_proj.parameters():
69
+ param.requires_grad = True
70
+ # Add learnable queries for decoder
71
+ self.num_queries = self.training_args.learnable_token_length # 77 # # 128
72
+ self.query_embed = nn.Parameter(torch.randn(1, self.num_queries, 1024, dtype=self._dtype))
73
+ self.query_embed.requires_grad = True
74
+
75
+ self.unet.enable_gradient_checkpointing()
76
+
77
+ def _unet_pred_noise(self, x_start, t, noise, context):
78
+ # Convert timesteps to long tensor
79
+ t = t.to(dtype=torch.long)
80
+
81
+ # Ensure consistent dtype for all tensors
82
+ dtype = self.unet.dtype
83
+ x_start = x_start.to(dtype)
84
+ noise = noise.to(dtype)
85
+ context = context.to(dtype)
86
+
87
+ # Add noise
88
+ nt = t.shape[0]
89
+ noised_latent = self.scheduler.add_noise(x_start, noise, t)
90
+
91
+ # Get prediction
92
+ pred_noise = self.unet(
93
+ noised_latent,
94
+ t,
95
+ encoder_hidden_states=context.expand(nt, -1, -1)
96
+ ).sample
97
+
98
+ return pred_noise
99
+
100
+ def generate_images(self, images):
101
+ batch_size = self.training_args.eval_batch_size
102
+ prompt = ["<MORE_DETAILED_CAPTION>"] * batch_size
103
+ inputs = self.processor(text=prompt, images=images, return_tensors="pt").to(self._device).to(self._dtype)
104
+ # Get embeddings
105
+ if inputs["input_ids"] is not None:
106
+ inputs_embeds = self.model.language_model.get_input_embeddings()(inputs["input_ids"]).to(self._dtype)
107
+ if inputs["pixel_values"] is not None:
108
+ image_features = self.model._encode_image(inputs["pixel_values"]).to(self._dtype)
109
+ inputs_embeds, attention_mask = self.model._merge_input_ids_with_image_features(image_features, inputs_embeds)
110
+ if inputs_embeds is not None:
111
+ attention_mask = attention_mask.to(inputs_embeds.dtype)
112
+ encoder_outputs = self.model.language_model.model.encoder(
113
+ inputs_embeds=inputs_embeds,
114
+ attention_mask=attention_mask,
115
+ output_hidden_states=True,
116
+ return_dict=True
117
+ )
118
+
119
+ # Prepare decoder inputs
120
+ decoder_input_embeds = self.query_embed.expand(batch_size, -1, -1) # [batch_size, 128, 1024]
121
+ decoder_attention_mask = torch.ones(
122
+ (batch_size, self.num_queries),
123
+ dtype=self._dtype,
124
+ device=self._device
125
+ )
126
+
127
+ encoder_hidden_states = encoder_outputs.last_hidden_state.to(self._dtype)
128
+ decoder_input_embeds = decoder_input_embeds.to(self._dtype)
129
+ attention_mask = attention_mask.to(self._dtype)
130
+
131
+ # Run decoder
132
+ decoder_outputs = self.model.language_model.model.decoder(
133
+ inputs_embeds=decoder_input_embeds,
134
+ attention_mask=decoder_attention_mask,
135
+ encoder_hidden_states=encoder_hidden_states,
136
+ encoder_attention_mask=attention_mask,
137
+ output_hidden_states=True,
138
+ return_dict=True
139
+ )
140
+
141
+ last_decoder_hidden_state = decoder_outputs.last_hidden_state # [batch_size, 128, 1024]
142
+ conditional_context = self.language_proj(last_decoder_hidden_state)
143
+
144
+ un_token = self.tokenizer("", padding="max_length", truncation=True,max_length=77, return_tensors="pt").input_ids.to(self._device)
145
+ un_context_embeddings = self.text_encoder(un_token).last_hidden_state
146
+ un_context_embeddings = un_context_embeddings.expand(batch_size, -1, -1)
147
+ if self.training_args.use_text_encoder:
148
+ context_embeddings = self.text_encoder(
149
+ inputs_embeds=conditional_context.to(self._dtype)
150
+ ).last_hidden_state # 1, 77 , 1024
151
+
152
+ latent_shape = (batch_size, 4, self.training_args.image_size // 8, self.training_args.image_size // 8)
153
+ latents = torch.randn(latent_shape, device=self._device, dtype=self._dtype)
154
+
155
+ scheduler = self.scheduler
156
+ scheduler.set_timesteps(self.training_args.num_inference_steps)
157
+ with torch.no_grad():
158
+ for t in scheduler.timesteps:
159
+ latent_model_input = torch.cat([latents, latents], dim=0)
160
+ latent_model_input = scheduler.scale_model_input(latent_model_input, t)
161
+
162
+ combined_embeddings = torch.cat([un_context_embeddings, context_embeddings], dim=0).to(self._dtype)
163
+ noise_pred = self.unet(
164
+ latent_model_input, t, encoder_hidden_states=combined_embeddings
165
+ )[0]
166
+
167
+ noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2, dim=0)
168
+ noise_pred = noise_pred_uncond + self.training_args.guidance_scale * (noise_pred_cond - noise_pred_uncond)
169
+
170
+ latents = scheduler.step(noise_pred, t, latents)[0]
171
+ # Decode latents with VAE
172
+
173
+ scaled_latents = latents / 0.18215
174
+ with torch.no_grad():
175
+ decoded_latents = self.vae.decode(scaled_latents.to(torch.float32))[0]
176
+
177
+ return decoded_latents
178
+
179
+ def get_conditional_context(self, images, batch_size=None):
180
+ if batch_size is None:
181
+ batch_size = self.batch_size
182
+ prompt = ["<MORE_DETAILED_CAPTION>"] * batch_size
183
+ inputs = self.processor(text=prompt, images=images, return_tensors="pt").to(self._device).to(self._dtype)
184
+ # Get embeddings
185
+ if inputs["input_ids"] is not None:
186
+ inputs_embeds = self.model.language_model.get_input_embeddings()(inputs["input_ids"]).to(self._dtype)
187
+ if inputs["pixel_values"] is not None:
188
+ image_features = self.model._encode_image(inputs["pixel_values"]).to(self._dtype)
189
+ inputs_embeds, attention_mask = self.model._merge_input_ids_with_image_features(image_features, inputs_embeds)
190
+ if inputs_embeds is not None:
191
+ attention_mask = attention_mask.to(inputs_embeds.dtype)
192
+ encoder_outputs = self.model.language_model.model.encoder(
193
+ inputs_embeds=inputs_embeds,
194
+ attention_mask=attention_mask,
195
+ output_hidden_states=True,
196
+ return_dict=True
197
+ )
198
+
199
+ # Prepare decoder inputs
200
+ decoder_input_embeds = self.query_embed.expand(batch_size, -1, -1) # [batch_size, 128, 1024]
201
+ decoder_attention_mask = torch.ones(
202
+ (batch_size, self.num_queries),
203
+ dtype=self._dtype,
204
+ device=self._device
205
+ )
206
+
207
+ encoder_hidden_states = encoder_outputs.last_hidden_state.to(self._dtype)
208
+ decoder_input_embeds = decoder_input_embeds.to(self._dtype)
209
+ attention_mask = attention_mask.to(self._dtype)
210
+
211
+ # Run decoder
212
+ decoder_outputs = self.model.language_model.model.decoder(
213
+ inputs_embeds=decoder_input_embeds,
214
+ attention_mask=decoder_attention_mask,
215
+ encoder_hidden_states=encoder_hidden_states,
216
+ encoder_attention_mask=attention_mask,
217
+ output_hidden_states=True,
218
+ return_dict=True
219
+ )
220
+
221
+ last_decoder_hidden_state = decoder_outputs.last_hidden_state # [batch_size, 128, 1024]
222
+ return last_decoder_hidden_state
223
+
224
+ def forward(
225
+ self,
226
+ image=None,
227
+ filename=None,
228
+ **kwargs,
229
+ ) -> SDOutput:
230
+ images_for_language_model = image
231
+ normalize_images = normalize(image, rescale=True)
232
+ x0=self.vae.encode(normalize_images.to(torch.float32)).latent_dist.sample()
233
+ latent = x0 * 0.18215
234
+
235
+ # prepare_total_timesteps
236
+ total_timestep = self.scheduler.num_train_timesteps
237
+
238
+ # Initiate timesteps and noise
239
+ timesteps = initiate_time_steps(0, total_timestep, self.batch_size, self.config).long()
240
+ timesteps = timesteps.to(self._device)
241
+ c, h, w = latent.shape[1:]
242
+ if not self.config.tta.use_same_noise_among_timesteps:
243
+ noise = torch.randn((self.batch_size, c, h, w), device=self._device, dtype=self._dtype)
244
+ else:
245
+ noise = torch.randn((1, c, h, w), device=self._device, dtype=self._dtype)
246
+ noise = noise.repeat(self.batch_size, 1, 1, 1)
247
+
248
+ # prepare context for language model
249
+ conditional_context = self.get_conditional_context(images_for_language_model)
250
+ conditional_context = self.language_proj(conditional_context) # [b, 159, 1024]
251
+
252
+ if self.training_args.use_text_encoder:
253
+ text_encoder_output = self.text_encoder(input_ids=None, inputs_embeds=conditional_context.to(self._dtype))
254
+ pred_noise = self._unet_pred_noise(x_start=latent, t=timesteps, noise=noise, context=text_encoder_output.last_hidden_state.to(self._dtype)).to(self._dtype)
255
+ else:
256
+ pred_noise = self._unet_pred_noise(x_start=latent, t=timesteps, noise=noise, context=conditional_context.to(self._dtype)).to(self._dtype)
257
+ # Compute diffusion loss
258
+ if self.config.tta.loss == "l1":
259
+ loss = torch.nn.functional.l1_loss(pred_noise, noise)
260
+ else:
261
+ loss = torch.nn.functional.mse_loss(pred_noise, noise)
262
+
263
+ return SDOutput(loss=loss)
De_DiffusionV2_stage2.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4
+
5
+ from dataclasses import dataclass
6
+ from typing import Optional, Tuple, Dict, Any, Union
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ import numpy as np
11
+ from transformers.utils import ModelOutput
12
+ from transformers import PreTrainedModel, PretrainedConfig
13
+ from transformers import AutoModelForCausalLM, AutoTokenizer
14
+ from transformers import AutoConfig
15
+ from safetensors.torch import load_file
16
+ import torchvision.transforms as transforms
17
+
18
+ from .De_DiffusionV2_Image import SDModel
19
+ from .sd_config import SDConfig
20
+ import argparse
21
+
22
+ def handle_module_prefix(state_dict):
23
+ """Handle 'module.' prefix in state dict keys."""
24
+ if any(k.startswith('module.') for k in state_dict.keys()):
25
+ return {k.replace('module.', ''): v for k, v in state_dict.items()}
26
+ return state_dict
27
+
28
+ def create_model_args(args):
29
+ """Create model arguments needed by SDModel."""
30
+ model_args = argparse.Namespace()
31
+ model_args.use_text_encoder = args.use_text_encoder
32
+ model_args.batch_size = args.batch_size
33
+ model_args.eval_batch_size = args.batch_size
34
+ model_args.distributed_strategy = 'none'
35
+ model_args.fp32 = args.fp32
36
+ model_args.learnable_token_length = args.learnable_token_length
37
+ model_args.num_inference_steps = args.num_inference_steps
38
+ model_args.image_size = args.image_size
39
+ model_args.guidance_scale = args.guidance_scale
40
+ model_args.unfreeze_florence2_all = False
41
+ model_args.unfreeze_florence2_language_model = False
42
+ model_args.unfreeze_florence2_language_model_decoder = False
43
+ return model_args
44
+
45
+ def load_model_checkpoint(model, model_path, device):
46
+ """Load model checkpoint."""
47
+ checkpoint = torch.load(model_path, map_location="cpu")
48
+
49
+ # Handle different checkpoint formats
50
+ if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
51
+ state_dict = checkpoint['model_state_dict']
52
+ else:
53
+ state_dict = checkpoint
54
+
55
+ state_dict = handle_module_prefix(state_dict)
56
+ model.load_state_dict(state_dict, strict=False)
57
+
58
+ return model
59
+
60
+ def initialize_diffusion_model(args):
61
+ """Initialize the diffusion model."""
62
+ config = SDConfig()
63
+ diffusion_model_args = create_model_args(args)
64
+ diffusion_model = SDModel(config, diffusion_model_args)
65
+ _dtype = torch.float32 if diffusion_model_args.fp32 else torch.bfloat16
66
+
67
+ # Delete components that aren't needed for inference
68
+ if hasattr(diffusion_model, 'vae'):
69
+ del diffusion_model.vae
70
+ if hasattr(diffusion_model, 'unet'):
71
+ del diffusion_model.unet
72
+
73
+ # Clear CUDA cache
74
+ torch.cuda.empty_cache()
75
+
76
+ diffusion_model = diffusion_model.to(_dtype)
77
+
78
+ # Freeze parameters that shouldn't be trained
79
+ for param in diffusion_model.language_proj.parameters():
80
+ param.requires_grad = False
81
+ diffusion_model.query_embed.requires_grad = False
82
+
83
+ return diffusion_model
84
+
85
+ class VLV_Config(PretrainedConfig):
86
+ model_type = "VLV_decoder"
87
+
88
+ def __init__(self, hidden_size=128, **kwargs):
89
+ super().__init__(**kwargs)
90
+ pass
91
+
92
+ class VLV_MODEL(PreTrainedModel):
93
+ config_class = VLV_Config
94
+ model_type = "VLV_decoder"
95
+
96
+ def __init__(self, config):
97
+ super().__init__(config)
98
+ """Load the CLIPDecoder model."""
99
+ # Initialize the diffusion model first
100
+ device = "cuda"
101
+ de_diffusion_model = initialize_diffusion_model(config)
102
+ clip_decoder_model = CLIPDecoder(
103
+ language_model=config.qwen_model,
104
+ qwen2_config=config.qwen2_config,
105
+ VLV_model=de_diffusion_model,
106
+ device=device,
107
+ bf16=config.mixed_precision
108
+ )
109
+
110
+ # Load the trained weights
111
+ # clip_decoder_model = load_model_checkpoint(clip_decoder_model, config.clip_decoder_checkpoint, device)
112
+
113
+ # Set to evaluation mode
114
+ clip_decoder_model.eval()
115
+
116
+ self.model = clip_decoder_model
117
+ self.max_new_tokens = config.max_length
118
+ self.num_beams = config.num_beams
119
+ self.transform = self.get_transform(config.image_size)
120
+
121
+ def get_transform(self, image_size):
122
+ """Transformation pipeline for input images."""
123
+ return transforms.Compose([
124
+ transforms.Resize(image_size),
125
+ transforms.CenterCrop((image_size, image_size)),
126
+ transforms.PILToTensor(),
127
+ ])
128
+
129
+ def forward(self, valid_images, max_length):
130
+ valid_images = [self.transform(img) for img in valid_images]
131
+ if hasattr(self.model, 'module'):
132
+ outputs = self.model.module.generate(
133
+ valid_images,
134
+ max_new_tokens=max_length,
135
+ num_beams=self.num_beams,
136
+ early_stopping=True
137
+ )
138
+ else:
139
+ outputs = self.model.generate(
140
+ valid_images,
141
+ max_new_tokens=max_length,
142
+ num_beams=self.num_beams,
143
+ early_stopping=True
144
+ )
145
+ return outputs
146
+
147
+
148
+ class MLP(nn.Module):
149
+ def __init__(self, input_dim, output_dim):
150
+ super(MLP, self).__init__()
151
+ self.layers = nn.Sequential(
152
+ nn.Linear(input_dim, output_dim),
153
+ nn.GELU(),
154
+ nn.Linear(output_dim, output_dim),
155
+ )
156
+
157
+ def forward(self, x):
158
+ return self.layers(x)
159
+
160
+
161
+ @dataclass
162
+ class CLIPDecoderOutput(ModelOutput):
163
+ """
164
+ Output class for the CLIP Decoder model.
165
+ """
166
+ last_hidden_state: Optional[torch.FloatTensor] = None
167
+ generated_ids: Optional[torch.LongTensor] = None
168
+ generated_text: Optional[list] = None
169
+
170
+
171
+ class CLIPDecoder(nn.Module):
172
+
173
+
174
+ def __init__(
175
+ self,
176
+ language_model: str,
177
+ qwen2_config: dict,
178
+ VLV_model: SDModel,
179
+ device: torch.device,
180
+ bf16: bool = False,
181
+ args: argparse.Namespace = None
182
+ ):
183
+ """
184
+ Initialize the CLIP Decoder model.
185
+
186
+ Args:
187
+ language_model: Path to the language model
188
+ VLV_model: The VLV model instance
189
+ device: The device to run the model on
190
+ bf16: Whether to use bfloat16 precision
191
+ """
192
+ super(CLIPDecoder, self).__init__()
193
+
194
+ self._dtype = torch.bfloat16 if bf16 =="bf16" else torch.float32
195
+ self.qwen2_tokenizer = AutoTokenizer.from_pretrained(language_model)
196
+ self.qwen2_config = AutoConfig.from_pretrained(language_model)
197
+ self.qwen2_model = AutoModelForCausalLM.from_config(self.qwen2_config)
198
+ self.qwen2_model.save_pretrained("QWEN_new_2", safe_serialization=False)
199
+ # self.qwen2_tokenizer = AutoTokenizer.from_pretrained(language_model)
200
+ # self.qwen2_model = AutoModelForCausalLM.from_pretrained(language_model,torch_dtype=self._dtype,device_map=None,low_cpu_mem_usage=True)
201
+ self.VLV_model = VLV_model # fp32 in this case
202
+ self.device = device
203
+ self.mlp = MLP(input_dim=1024, output_dim=self.qwen2_model.config.hidden_size)
204
+ self.ignore_token_id = -100
205
+
206
+ # self.qwen2_model = self.qwen2_model.to_empty(device=self.device)
207
+ # self.VLV_model = self.VLV_model.to_empty(device=self.device)
208
+ # self.mlp = self.mlp.to_empty(device=self.device)
209
+ # self.qwen2_model.gradient_checkpointing_enable()
210
+ # after you load qwen2_model
211
+ # for p in self.qwen_embed_tokens.parameters():
212
+ # p.requires_grad = False
213
+
214
+
215
+ def get_conditional_context(self, images, batch_size):
216
+ """
217
+ Get conditional context from images using the diffusion model.
218
+
219
+ Args:
220
+ images: Input images
221
+ batch_size: Batch size
222
+
223
+ Returns:
224
+ Decoder hidden states from the diffusion model
225
+ """
226
+ prompt = ["<MORE_DETAILED_CAPTION>"] * batch_size
227
+ inputs = self.VLV_model.processor(text=prompt, images=images, return_tensors="pt").to(self.device).to(torch.float32)
228
+
229
+ self.VLV_model = self.VLV_model.to(inputs["input_ids"].device)
230
+ self.qwen2_model = self.qwen2_model.to(inputs["input_ids"].device)
231
+ self.mlp = self.mlp.to(inputs["input_ids"].device)
232
+ self.VLV_model.model.language_model.model = self.VLV_model.model.language_model.model.to(inputs["input_ids"].device)
233
+
234
+ if inputs["input_ids"] is not None:
235
+ inputs_embeds = self.VLV_model.model.language_model.get_input_embeddings()(inputs["input_ids"]).to(self.device)
236
+
237
+ if inputs["pixel_values"] is not None:
238
+ image_features = self.VLV_model.model._encode_image(inputs["pixel_values"]).to(self.device)
239
+ inputs_embeds, attention_mask = self.VLV_model.model._merge_input_ids_with_image_features(
240
+ image_features, inputs_embeds
241
+ )
242
+
243
+ if inputs_embeds is not None:
244
+ attention_mask = attention_mask.to(inputs_embeds.dtype)
245
+
246
+ encoder_outputs = self.VLV_model.model.language_model.model.encoder(
247
+ inputs_embeds=inputs_embeds,
248
+ attention_mask=attention_mask,
249
+ output_hidden_states=True,
250
+ return_dict=True
251
+ )
252
+
253
+ decoder_inputs_embeds = self.VLV_model.query_embed.expand(batch_size, -1, -1)
254
+ decoder_attention_mask = torch.ones(
255
+ (batch_size, self.VLV_model.num_queries),
256
+ dtype=torch.float32,
257
+ device=self.device
258
+ )
259
+
260
+ encoder_hidden_states = encoder_outputs.last_hidden_state.to(torch.float32)
261
+ decoder_input_embeds = decoder_inputs_embeds.to(torch.float32)
262
+ attention_mask = attention_mask.to(torch.float32)
263
+
264
+ decoder_outputs = self.VLV_model.model.language_model.model.decoder(
265
+ inputs_embeds=decoder_input_embeds,
266
+ attention_mask=decoder_attention_mask,
267
+ encoder_hidden_states=encoder_hidden_states,
268
+ encoder_attention_mask=attention_mask,
269
+ output_hidden_states=True,
270
+ return_dict=True
271
+ )
272
+
273
+ return decoder_outputs.last_hidden_state
274
+
275
+ def process_image(self, images, batch_size):
276
+ """
277
+ Process images to get clip text embeddings.
278
+
279
+ Args:
280
+ images: Input images
281
+ batch_size: Batch size
282
+
283
+ Returns:
284
+ Processed clip text embeddings and attention mask
285
+ """
286
+ decoder_hidden_states = self.get_conditional_context(images, batch_size)
287
+ context_embeds = self.VLV_model.language_proj(decoder_hidden_states)
288
+ clip_text_embeds = self.VLV_model.text_encoder(inputs_embeds=context_embeds).last_hidden_state
289
+ # clip_text_embeds = clip_text_embeds.to(self._dtype)
290
+ clip_text_embeds = self.mlp(clip_text_embeds)
291
+ clip_text_embeds_attention_mask = torch.ones(
292
+ (batch_size, self.VLV_model.num_queries),
293
+ dtype=torch.long,
294
+ device=self.device
295
+ )
296
+
297
+ return clip_text_embeds, clip_text_embeds_attention_mask
298
+
299
+ def prepare_generation_inputs(self, clip_text_embeds, clip_text_attention_mask=None):
300
+ """
301
+ Prepare inputs for text generation.
302
+
303
+ Args:
304
+ clip_text_embeds: Processed clip text embeddings
305
+ clip_text_attention_mask: Attention mask for clip text embeddings
306
+
307
+ Returns:
308
+ Dictionary of generation inputs
309
+ """
310
+ if clip_text_attention_mask is None:
311
+ clip_text_attention_mask = torch.ones(
312
+ (clip_text_embeds.shape[0], clip_text_embeds.shape[1]),
313
+ dtype=torch.long,
314
+ device=clip_text_embeds.device
315
+ )
316
+
317
+ return {
318
+ "inputs_embeds": clip_text_embeds,
319
+ "attention_mask": clip_text_attention_mask
320
+ }
321
+
322
+ def generate(self, images, max_new_tokens=300, num_beams=4, early_stopping=True):
323
+ """
324
+ Generate text from images.
325
+
326
+ Args:
327
+ images: Input images
328
+ max_new_tokens: Maximum number of tokens to generate
329
+ num_beams: Number of beams for beam search
330
+ early_stopping: Whether to stop early in beam search
331
+
332
+ Returns:
333
+ CLIPDecoderOutput with generated ids and text
334
+ """
335
+ batch_size = len(images)
336
+ clip_text_embeds, clip_text_attention_mask = self.process_image(images, batch_size)
337
+ generation_inputs = self.prepare_generation_inputs(clip_text_embeds, clip_text_attention_mask)
338
+
339
+ generation_inputs["inputs_embeds"] = generation_inputs["inputs_embeds"].to(torch.bfloat16)
340
+ generation_inputs["attention_mask"] = generation_inputs["attention_mask"].to(torch.bfloat16)
341
+
342
+ generated_ids = self.qwen2_model.generate(
343
+ inputs_embeds=generation_inputs["inputs_embeds"],
344
+ attention_mask=generation_inputs["attention_mask"],
345
+ max_new_tokens=max_new_tokens,
346
+ num_beams=num_beams,
347
+ early_stopping=early_stopping
348
+ )
349
+
350
+ generated_text = self.qwen2_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
351
+
352
+ return CLIPDecoderOutput(
353
+ generated_ids=generated_ids,
354
+ generated_text=generated_text
355
+ )
356
+
357
+ def forward(self, images, captions=None):
358
+ """
359
+ Forward pass for training.
360
+
361
+ Args:
362
+ images: Input images
363
+ captions: Target captions (optional, for training)
364
+
365
+ Returns:
366
+ CLIPDecoderOutput with loss and logits
367
+ """
368
+ batch_size = images.shape[0]
369
+
370
+ # Process images
371
+ clip_text_embeds, clip_text_attention_mask = self.process_image(images, batch_size)
372
+
373
+ # If no captions provided, return embeddings for generation
374
+ if captions is None:
375
+ return CLIPDecoderOutput(
376
+ last_hidden_state=clip_text_embeds
377
+ )
378
+ assert len(captions) == batch_size
379
+ # Process captions for training
380
+ qwen_input_ids = self.qwen2_tokenizer(
381
+ text=captions,
382
+ truncation=True,
383
+ return_tensors="pt",
384
+ padding="max_length",
385
+ max_length=300,
386
+ return_token_type_ids=False,
387
+ ).input_ids
388
+ assert len(captions) == batch_size
389
+ qwen_attention_mask = qwen_input_ids.ne(self.qwen2_tokenizer.pad_token_id).to(torch.long).to(self.device)
390
+
391
+ # Prepare labels for training
392
+ labels = qwen_input_ids
393
+ labels[labels == self.qwen2_tokenizer.pad_token_id] = self.ignore_token_id
394
+ labels = labels.to(self.device)
395
+ # Get embeddings for captions to create the full input sequence
396
+ labels_for_embeddings = labels.clone()
397
+ labels_for_embeddings[labels_for_embeddings == self.ignore_token_id] = self.qwen2_tokenizer.pad_token_id
398
+ clip_text_embeds_qwen = self.qwen2_model.get_input_embeddings()(labels_for_embeddings)
399
+
400
+ # Concatenate the embeddings and prepare attention mask
401
+ inputs_embeds = torch.cat((clip_text_embeds, clip_text_embeds_qwen), dim=1)
402
+ clip_seq_len = clip_text_embeds.shape[1]
403
+ clip_ignore_labels = torch.full((labels.shape[0], clip_seq_len), self.ignore_token_id).to(labels)
404
+ combined_labels = torch.cat((clip_ignore_labels, labels), dim=1)
405
+
406
+ attention_mask = torch.cat((
407
+ clip_text_attention_mask,
408
+ qwen_attention_mask
409
+ ), dim=1)
410
+
411
+ # Forward through language model
412
+ outputs = self.qwen2_model(
413
+ inputs_embeds=inputs_embeds,
414
+ labels=combined_labels,
415
+ attention_mask=attention_mask,
416
+ use_cache=False
417
+ )
418
+ return outputs
build_unfreeze.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import AutoencoderKL, UNet2DConditionModel, DDPMScheduler, EulerDiscreteScheduler
3
+ from transformers import CLIPTokenizer, CLIPImageProcessor, CLIPTextConfig
4
+ from .modeling_clip import CustomCLIPTextModel
5
+ from .processing_florence2 import Florence2Processor
6
+ from .modeling_florence2 import Florence2ForConditionalGeneration
7
+ from .configuration_florence2 import Florence2Config
8
+
9
+ from diffusers import StableDiffusionPipeline
10
+ from transformers import AutoProcessor, AutoModelForCausalLM
11
+
12
+
13
+ def load_sd_model(training_args):
14
+ """Load Stable Diffusion model"""
15
+
16
+ repo_id = "stabilityai/stable-diffusion-2-1"
17
+
18
+ text_encoder = CustomCLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder")
19
+ tokenizer = CLIPTokenizer.from_pretrained(repo_id, subfolder="tokenizer")
20
+ vae = AutoencoderKL.from_pretrained(repo_id, subfolder="vae",revision=None)
21
+ scheduler = DDPMScheduler.from_pretrained(repo_id, subfolder="scheduler")
22
+ unet = UNet2DConditionModel.from_pretrained(repo_id, subfolder="unet",revision=None)
23
+
24
+ for m in [vae, text_encoder, unet]:
25
+ for param in m.parameters():
26
+ param.requires_grad = False
27
+
28
+ return (vae, tokenizer, text_encoder, unet, scheduler)
29
+
30
+
31
+ def load_Florence2_model(training_args):
32
+ config = Florence2Config.from_pretrained("microsoft/Florence-2-large")
33
+ config.vision_config.model_type = "davit"
34
+ config._attn_implementation = "eager"
35
+ # # Load the model
36
+ model = Florence2ForConditionalGeneration(config=config)
37
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True)
38
+
39
+ # freeze the model
40
+ if training_args.unfreeze_florence2_all:
41
+ for param in model.parameters():
42
+ param.requires_grad = True
43
+ elif training_args.unfreeze_florence2_language_model:
44
+ for param in model.parameters():
45
+ param.requires_grad = False
46
+ for param in model.language_model.parameters():
47
+ param.requires_grad = True
48
+ for param in model.language_model.lm_head.parameters():
49
+ param.requires_grad = False
50
+
51
+ model.language_model.lm_head.weight = torch.nn.Parameter(
52
+ model.language_model.lm_head.weight.detach().clone())
53
+
54
+ for p in model.language_model.lm_head.parameters():
55
+ p.requires_grad = False
56
+
57
+
58
+ elif training_args.unfreeze_florence2_language_model_decoder:
59
+ # Create a separate embedding layer for decoder
60
+ original_embeddings = model.language_model.model.shared
61
+ new_decoder_embeddings = torch.nn.Embedding(
62
+ num_embeddings=original_embeddings.num_embeddings,
63
+ embedding_dim=original_embeddings.embedding_dim,
64
+ padding_idx=original_embeddings.padding_idx
65
+ )
66
+ # Copy the weights
67
+ new_decoder_embeddings.weight.data = original_embeddings.weight.data.clone()
68
+
69
+ # Replace the decoder embeddings
70
+ model.language_model.model.encoder.embed_tokens = original_embeddings
71
+ model.language_model.model.decoder.embed_tokens = new_decoder_embeddings
72
+ for param in model.parameters():
73
+ param.requires_grad = False
74
+ for param in model.language_model.model.decoder.parameters():
75
+ param.requires_grad = True
76
+ model.language_model.model.decoder.embed_tokens.weight.requires_grad = False
77
+ else:
78
+ for param in model.parameters():
79
+ param.requires_grad = False
80
+
81
+ return model, processor
config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VLV_MODEL"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "De_DiffusionV2_stage2.VLV_Config",
7
+ "AutoModel": "De_DiffusionV2_stage2.VLV_MODEL",
8
+ "AutoModelForCausalLM": "De_DiffusionV2_stage2.VLV_MODEL"
9
+ },
10
+ "model_type": "VLV_decoder",
11
+ "batch_size": 1,
12
+ "deepspeed": true,
13
+ "distributed": true,
14
+ "fp32": true,
15
+ "guidance_scale": 2.0,
16
+ "hidden_size": 128,
17
+ "image_size": 768,
18
+ "learnable_token_length": 77,
19
+ "local_rank": 0,
20
+ "mixed_precision": "bf16",
21
+ "num_inference_steps": 50,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.51.1",
24
+ "use_text_encoder": true,
25
+ "verbose": true,
26
+ "qwen_model": "Qwen/Qwen2.5-3B",
27
+ "qwen2_config":{
28
+ "architectures": [
29
+ "Qwen2ForCausalLM"
30
+ ],
31
+ "attention_dropout": 0.0,
32
+ "bos_token_id": 151643,
33
+ "eos_token_id": 151643,
34
+ "hidden_act": "silu",
35
+ "hidden_size": 2048,
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": 11008,
38
+ "max_position_embeddings": 32768,
39
+ "max_window_layers": 36,
40
+ "model_type": "qwen2",
41
+ "num_attention_heads": 16,
42
+ "num_hidden_layers": 36,
43
+ "num_key_value_heads": 2,
44
+ "rms_norm_eps": 1e-06,
45
+ "rope_theta": 1000000.0,
46
+ "sliding_window": 32768,
47
+ "tie_word_embeddings": true,
48
+ "torch_dtype": "bfloat16",
49
+ "transformers_version": "4.40.1",
50
+ "use_cache": true,
51
+ "use_mrope": false,
52
+ "use_sliding_window": false,
53
+ "vocab_size": 151936
54
+ }
55
+ }
configuration_florence2.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import warnings
15
+ """ Florence-2 configuration"""
16
+
17
+ from typing import Optional
18
+
19
+ from transformers import AutoConfig
20
+ from transformers.configuration_utils import PretrainedConfig
21
+ from transformers.utils import logging
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ class Florence2VisionConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel
28
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
29
+ defaults will yield a similar configuration to that of the Florence2VisionModel architecture.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+ Args:
35
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
36
+ The dropout rate of the drop path layer.
37
+ patch_size (`List[int]`, *optional*, defaults to [7, 3, 3, 3]):
38
+ The patch size of the image.
39
+ patch_stride (`List[int]`, *optional*, defaults to [4, 2, 2, 2]):
40
+ The patch stride of the image.
41
+ patch_padding (`List[int]`, *optional*, defaults to [3, 1, 1, 1]):
42
+ The patch padding of the image.
43
+ patch_prenorm (`List[bool]`, *optional*, defaults to [false, true, true, true]):
44
+ Whether to apply layer normalization before the patch embedding layer.
45
+ enable_checkpoint (`bool`, *optional*, defaults to False):
46
+ Whether to enable checkpointing.
47
+ dim_embed (`List[int]`, *optional*, defaults to [256, 512, 1024, 2048]):
48
+ The dimension of the embedding layer.
49
+ num_heads (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
50
+ The number of attention heads.
51
+ num_groups (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
52
+ The number of groups.
53
+ depths (`List[int]`, *optional*, defaults to [1, 1, 9, 1]):
54
+ The depth of the model.
55
+ window_size (`int`, *optional*, defaults to 12):
56
+ The window size of the model.
57
+ projection_dim (`int`, *optional*, defaults to 1024):
58
+ The dimension of the projection layer.
59
+ visual_temporal_embedding (`dict`, *optional*):
60
+ The configuration of the visual temporal embedding.
61
+ image_pos_embed (`dict`, *optional*):
62
+ The configuration of the image position embedding.
63
+ image_feature_source (`List[str]`, *optional*, defaults to ["spatial_avg_pool", "temporal_avg_pool"]):
64
+ The source of the image feature.
65
+ Example:
66
+
67
+ ```python
68
+ >>> from transformers import Florence2VisionConfig, Florence2VisionModel
69
+
70
+ >>> # Initializing a Florence2 Vision style configuration
71
+ >>> configuration = Florence2VisionConfig()
72
+
73
+ >>> # Initializing a model (with random weights)
74
+ >>> model = Florence2VisionModel(configuration)
75
+
76
+ >>> # Accessing the model configuration
77
+ >>> configuration = model.config
78
+ ```"""
79
+
80
+ model_type = "florence2_vision"
81
+ keys_to_ignore_at_inference = ["past_key_values"]
82
+
83
+ def __init__(
84
+ self,
85
+ drop_path_rate=0.1,
86
+ patch_size=[7, 3, 3, 3],
87
+ patch_stride=[4, 2, 2, 2],
88
+ patch_padding=[3, 1, 1, 1],
89
+ patch_prenorm=[False, True, True, True],
90
+ enable_checkpoint=False,
91
+ dim_embed=[256, 512, 1024, 2048],
92
+ num_heads=[8, 16, 32, 64],
93
+ num_groups=[8, 16, 32, 64],
94
+ depths=[1, 1, 9, 1],
95
+ window_size=12,
96
+ projection_dim=1024,
97
+ visual_temporal_embedding=None,
98
+ image_pos_embed=None,
99
+ image_feature_source=["spatial_avg_pool", "temporal_avg_pool"],
100
+ **kwargs,
101
+ ):
102
+ self.drop_path_rate = drop_path_rate
103
+ self.patch_size = patch_size
104
+ self.patch_stride = patch_stride
105
+ self.patch_padding = patch_padding
106
+ self.patch_prenorm = patch_prenorm
107
+ self.enable_checkpoint = enable_checkpoint
108
+ self.dim_embed = dim_embed
109
+ self.num_heads = num_heads
110
+ self.num_groups = num_groups
111
+ self.depths = depths
112
+ self.window_size = window_size
113
+ self.projection_dim = projection_dim
114
+ self.visual_temporal_embedding = visual_temporal_embedding
115
+ self.image_pos_embed = image_pos_embed
116
+ self.image_feature_source = image_feature_source
117
+
118
+ super().__init__(**kwargs)
119
+
120
+
121
+
122
+ class Florence2LanguageConfig(PretrainedConfig):
123
+ r"""
124
+ This is the configuration class to store the configuration of a [`Florence2LanguagePreTrainedModel`]. It is used to instantiate a BART
125
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
126
+ defaults will yield a similar configuration to that of the BART
127
+ [facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
128
+
129
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
130
+ documentation from [`PretrainedConfig`] for more information.
131
+
132
+
133
+ Args:
134
+ vocab_size (`int`, *optional*, defaults to 51289):
135
+ Vocabulary size of the Florence2Language model. Defines the number of different tokens that can be represented by the
136
+ `inputs_ids` passed when calling [`Florence2LanguageModel`].
137
+ d_model (`int`, *optional*, defaults to 1024):
138
+ Dimensionality of the layers and the pooler layer.
139
+ encoder_layers (`int`, *optional*, defaults to 12):
140
+ Number of encoder layers.
141
+ decoder_layers (`int`, *optional*, defaults to 12):
142
+ Number of decoder layers.
143
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
144
+ Number of attention heads for each attention layer in the Transformer encoder.
145
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
146
+ Number of attention heads for each attention layer in the Transformer decoder.
147
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
148
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
149
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
150
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
151
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
152
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
153
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
154
+ dropout (`float`, *optional*, defaults to 0.1):
155
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
156
+ attention_dropout (`float`, *optional*, defaults to 0.0):
157
+ The dropout ratio for the attention probabilities.
158
+ activation_dropout (`float`, *optional*, defaults to 0.0):
159
+ The dropout ratio for activations inside the fully connected layer.
160
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
161
+ The dropout ratio for classifier.
162
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
163
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
164
+ just in case (e.g., 512 or 1024 or 2048).
165
+ init_std (`float`, *optional*, defaults to 0.02):
166
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
167
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
168
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
169
+ for more details.
170
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
171
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
172
+ for more details.
173
+ scale_embedding (`bool`, *optional*, defaults to `False`):
174
+ Scale embeddings by diving by sqrt(d_model).
175
+ use_cache (`bool`, *optional*, defaults to `True`):
176
+ Whether or not the model should return the last key/values attentions (not used by all models).
177
+ num_labels (`int`, *optional*, defaults to 3):
178
+ The number of labels to use in [`Florence2LanguageForSequenceClassification`].
179
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
180
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
181
+ `eos_token_id`.
182
+
183
+ Example:
184
+
185
+ ```python
186
+ >>> from transformers import Florence2LanguageConfig, Florence2LanguageModel
187
+
188
+ >>> # Initializing a Florence2 Language style configuration
189
+ >>> configuration = Florence2LanguageConfig()
190
+
191
+ >>> # Initializing a model (with random weights)
192
+ >>> model = Florence2LangaugeModel(configuration)
193
+
194
+ >>> # Accessing the model configuration
195
+ >>> configuration = model.config
196
+ ```"""
197
+
198
+ model_type = "florence2_language"
199
+ keys_to_ignore_at_inference = ["past_key_values"]
200
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
201
+
202
+ def __init__(
203
+ self,
204
+ vocab_size=51289,
205
+ max_position_embeddings=1024,
206
+ encoder_layers=12,
207
+ encoder_ffn_dim=4096,
208
+ encoder_attention_heads=16,
209
+ decoder_layers=12,
210
+ decoder_ffn_dim=4096,
211
+ decoder_attention_heads=16,
212
+ encoder_layerdrop=0.0,
213
+ decoder_layerdrop=0.0,
214
+ activation_function="gelu",
215
+ d_model=1024,
216
+ dropout=0.1,
217
+ attention_dropout=0.0,
218
+ activation_dropout=0.0,
219
+ init_std=0.02,
220
+ classifier_dropout=0.0,
221
+ scale_embedding=False,
222
+ use_cache=True,
223
+ num_labels=3,
224
+ pad_token_id=1,
225
+ bos_token_id=0,
226
+ eos_token_id=2,
227
+ is_encoder_decoder=True,
228
+ decoder_start_token_id=2,
229
+ forced_eos_token_id=2,
230
+ **kwargs,
231
+ ):
232
+ self.vocab_size = vocab_size
233
+ self.max_position_embeddings = max_position_embeddings
234
+ self.d_model = d_model
235
+ self.encoder_ffn_dim = encoder_ffn_dim
236
+ self.encoder_layers = encoder_layers
237
+ self.encoder_attention_heads = encoder_attention_heads
238
+ self.decoder_ffn_dim = decoder_ffn_dim
239
+ self.decoder_layers = decoder_layers
240
+ self.decoder_attention_heads = decoder_attention_heads
241
+ self.dropout = dropout
242
+ self.attention_dropout = attention_dropout
243
+ self.activation_dropout = activation_dropout
244
+ self.activation_function = activation_function
245
+ self.init_std = init_std
246
+ self.encoder_layerdrop = encoder_layerdrop
247
+ self.decoder_layerdrop = decoder_layerdrop
248
+ self.classifier_dropout = classifier_dropout
249
+ self.use_cache = use_cache
250
+ self.num_hidden_layers = encoder_layers
251
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
252
+
253
+ super().__init__(
254
+ num_labels=num_labels,
255
+ pad_token_id=pad_token_id,
256
+ bos_token_id=bos_token_id,
257
+ eos_token_id=eos_token_id,
258
+ is_encoder_decoder=is_encoder_decoder,
259
+ decoder_start_token_id=decoder_start_token_id,
260
+ forced_eos_token_id=forced_eos_token_id,
261
+ **kwargs,
262
+ )
263
+
264
+ # ensure backward compatibility for BART CNN models
265
+ if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
266
+ self.forced_bos_token_id = self.bos_token_id
267
+ warnings.warn(
268
+ f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
269
+ "The config can simply be saved and uploaded again to be fixed."
270
+ )
271
+
272
+ class Florence2Config(PretrainedConfig):
273
+ r"""
274
+ This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an
275
+ Florence-2 model according to the specified arguments, defining the model architecture.
276
+
277
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
278
+ documentation from [`PretrainedConfig`] for more information.
279
+
280
+ Args:
281
+ vision_config (`Florence2VisionConfig`, *optional*):
282
+ Custom vision config or dict
283
+ text_config (`Union[AutoConfig, dict]`, *optional*):
284
+ The config object of the text backbone.
285
+ ignore_index (`int`, *optional*, defaults to -100):
286
+ The ignore index for the loss function.
287
+ vocab_size (`int`, *optional*, defaults to 51289):
288
+ Vocabulary size of the Florence2model. Defines the number of different tokens that can be represented by the
289
+ `inputs_ids` passed when calling [`~Florence2ForConditionalGeneration`]
290
+ projection_dim (`int`, *optional*, defaults to 1024):
291
+ Dimension of the multimodal projection space.
292
+
293
+ Example:
294
+
295
+ ```python
296
+ >>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig
297
+
298
+ >>> # Initializing a clip-like vision config
299
+ >>> vision_config = CLIPVisionConfig()
300
+
301
+ >>> # Initializing a Bart config
302
+ >>> text_config = BartConfig()
303
+
304
+ >>> # Initializing a Florence-2 configuration
305
+ >>> configuration = Florence2Config(vision_config, text_config)
306
+
307
+ >>> # Initializing a model from the florence-2 configuration
308
+ >>> model = Florence2ForConditionalGeneration(configuration)
309
+
310
+ >>> # Accessing the model configuration
311
+ >>> configuration = model.config
312
+ ```"""
313
+
314
+ model_type = "florence2"
315
+ is_composition = False
316
+
317
+ def __init__(
318
+ self,
319
+ vision_config=None,
320
+ text_config=None,
321
+ ignore_index=-100,
322
+ vocab_size=51289,
323
+ projection_dim=1024,
324
+ **kwargs,
325
+ ):
326
+ self.ignore_index = ignore_index
327
+ self.vocab_size = vocab_size
328
+ self.projection_dim = projection_dim
329
+ if vision_config is not None:
330
+ vision_config = PretrainedConfig(**vision_config)
331
+ self.vision_config = vision_config
332
+ self.vocab_size = self.vocab_size
333
+
334
+ self.text_config = text_config
335
+ if text_config is not None:
336
+ self.text_config = Florence2LanguageConfig(**text_config)
337
+
338
+
339
+ super().__init__(**kwargs)
340
+
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b184db6b67379fca61c92bc26004b60bdd5af7f894369dd869b0c95cae3eba73
3
+ size 4957566496
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6ebd062fa9eec9bb4beb9eaf10a55db4858053aefe1f50fd5d3f40f0a9ef04f
3
+ size 4995486764
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:066571ad0b78fcba6cbbe1dacbbd000081e7bdd59634e78ab5a265fc1cefd892
3
+ size 1553821280
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_clip.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import CLIPTokenizer, CLIPImageProcessor, CLIPTextModel, CLIPPreTrainedModel, CLIPTextConfig
2
+ from transformers.models.clip.modeling_clip import CLIPTextEmbeddings, CLIPEncoder, CLIPAttention, CLIPMLP, CLIPEncoderLayer, _create_4d_causal_attention_mask, _prepare_4d_attention_mask, BaseModelOutputWithPooling
3
+ from typing import Optional, Union, Tuple
4
+ import torch
5
+ from torch import nn
6
+
7
+ class CustomCLIPTokenizer(CLIPTokenizer):
8
+ def __init__(self, *args, **kwargs):
9
+ super().__init__(*args, **kwargs)
10
+ # Inherit everything from the original tokenizer
11
+ # No additional initialization needed unless you want to add specific features
12
+
13
+ class CustomCLIPImageProcessor(CLIPImageProcessor):
14
+ def __init__(self, *args, **kwargs):
15
+ super().__init__(*args, **kwargs)
16
+ # Inherit everything from the original processor
17
+ # No additional initialization needed unless you want to add specific features
18
+
19
+ class CustomCLIPTextTransformer(nn.Module):
20
+ def __init__(self, config):
21
+ super().__init__()
22
+ self.config = config
23
+ embed_dim = config.hidden_size
24
+
25
+ self.embeddings = CLIPTextEmbeddings(config)
26
+ self.encoder = CLIPEncoder(config)
27
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
28
+
29
+ # For `pooled_output` computation
30
+ self.eos_token_id = config.eos_token_id
31
+
32
+ def forward(
33
+ self,
34
+ input_ids: Optional[torch.Tensor] = None,
35
+ attention_mask: Optional[torch.Tensor] = None,
36
+ position_ids: Optional[torch.Tensor] = None,
37
+ inputs_embeds: Optional[torch.FloatTensor] = None,
38
+ output_attentions: Optional[bool] = None,
39
+ output_hidden_states: Optional[bool] = None,
40
+ return_dict: Optional[bool] = None,
41
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
42
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
43
+ output_hidden_states = (
44
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
45
+ )
46
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
47
+
48
+ if input_ids is not None and inputs_embeds is not None:
49
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
50
+
51
+ if input_ids is None and inputs_embeds is None:
52
+ raise ValueError("You must provide either input_ids or inputs_embeds")
53
+
54
+
55
+ if inputs_embeds is not None:
56
+ inputs_embeds = self.embeddings(inputs_embeds=inputs_embeds)
57
+ else:
58
+ inputs_embeds = self.embeddings(input_ids=input_ids, position_ids=position_ids)
59
+
60
+ # CLIP's text model uses causal mask, prepare it here.
61
+ causal_attention_mask = _create_4d_causal_attention_mask(
62
+ inputs_embeds.size()[:-1], inputs_embeds.dtype, device=inputs_embeds.device
63
+ )
64
+
65
+ # expand attention_mask
66
+ if attention_mask is not None:
67
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
68
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
69
+
70
+ encoder_outputs = self.encoder(
71
+ inputs_embeds=inputs_embeds,
72
+ attention_mask=attention_mask,
73
+ causal_attention_mask=causal_attention_mask,
74
+ output_attentions=output_attentions,
75
+ output_hidden_states=output_hidden_states,
76
+ return_dict=return_dict,
77
+ )
78
+
79
+ last_hidden_state = encoder_outputs[0]
80
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
81
+
82
+ # Update the pooled output computation to work with both input types
83
+ if input_ids is not None:
84
+ # Use input_ids to find the EOS token position
85
+ if self.eos_token_id == 2:
86
+ pooled_output = last_hidden_state[
87
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
88
+ input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
89
+ ]
90
+ else:
91
+ pooled_output = last_hidden_state[
92
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
93
+ (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
94
+ .int()
95
+ .argmax(dim=-1),
96
+ ]
97
+ else:
98
+ # When using inputs_embeds, use the last token as the pooled output
99
+ pooled_output = last_hidden_state[:, -1]
100
+
101
+ if not return_dict:
102
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
103
+
104
+ return BaseModelOutputWithPooling(
105
+ last_hidden_state=last_hidden_state,
106
+ pooler_output=pooled_output,
107
+ hidden_states=encoder_outputs.hidden_states,
108
+ attentions=encoder_outputs.attentions,
109
+ )
110
+
111
+ class CustomCLIPTextModel(CLIPPreTrainedModel):
112
+ config_class = CLIPTextConfig
113
+ _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
114
+
115
+ def __init__(self, config: CLIPTextConfig):
116
+ super().__init__(config)
117
+ self.text_model = CustomCLIPTextTransformer(config)
118
+ # Initialize weights and apply final processing
119
+ self.post_init()
120
+
121
+ def get_input_embeddings(self) -> nn.Module:
122
+ return self.text_model.embeddings.token_embedding
123
+
124
+ def set_input_embeddings(self, value):
125
+ self.text_model.embeddings.token_embedding = value
126
+
127
+ def forward(
128
+ self,
129
+ input_ids: Optional[torch.Tensor] = None,
130
+ attention_mask: Optional[torch.Tensor] = None,
131
+ position_ids: Optional[torch.Tensor] = None,
132
+ inputs_embeds: Optional[torch.FloatTensor] = None,
133
+ output_attentions: Optional[bool] = None,
134
+ output_hidden_states: Optional[bool] = None,
135
+ return_dict: Optional[bool] = None,
136
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
137
+ print("input_ids shape:", input_ids)
138
+ print("attention_mask shape:", attention_mask)
139
+ print("position_ids shape:", position_ids)
140
+ if inputs_embeds is not None:
141
+ print("inputs_embeds shape:", inputs_embeds.shape)
142
+
143
+ print("output_attentions:", output_attentions)
144
+ print("output_hidden_states:", output_hidden_states)
145
+ print("return_dict:", return_dict)
146
+
147
+ return self.text_model(
148
+ input_ids=input_ids,
149
+ attention_mask=attention_mask,
150
+ position_ids=position_ids,
151
+ inputs_embeds=inputs_embeds,
152
+ output_attentions=output_attentions,
153
+ output_hidden_states=output_hidden_states,
154
+ return_dict=return_dict,
155
+ )
modeling_florence2.py ADDED
The diff for this file is too large to render. See raw diff
 
processing_florence2.py ADDED
@@ -0,0 +1,1147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Florence-2.
17
+ """
18
+
19
+ import re
20
+ import logging
21
+ from typing import List, Optional, Union
22
+ import numpy as np
23
+ import math
24
+
25
+ import torch
26
+
27
+ from transformers.feature_extraction_utils import BatchFeature
28
+ from transformers.image_utils import ImageInput, is_valid_image
29
+ from transformers.processing_utils import ProcessorMixin
30
+ from transformers.tokenization_utils_base import (
31
+ PaddingStrategy,
32
+ PreTokenizedInput,
33
+ TextInput,
34
+ TruncationStrategy,
35
+ )
36
+ from transformers import BartTokenizer, BartTokenizerFast
37
+ from transformers.utils import TensorType
38
+
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+ # Copied from transformers.models.idefics2.processing_idefics2.is_url
43
+ def is_url(val) -> bool:
44
+ return isinstance(val, str) and val.startswith("http")
45
+
46
+ # Copied from transformers.models.idefics2.processing_idefics2.is_image_or_image_url
47
+ def is_image_or_image_url(elem):
48
+ return is_url(elem) or is_valid_image(elem)
49
+
50
+
51
+ def _is_str_or_image(elem):
52
+ return isinstance(elem, (str)) or is_image_or_image_url(elem)
53
+
54
+
55
+ class Florence2Processor(ProcessorMixin):
56
+ r"""
57
+ Constructs a Florence2 processor which wraps a Florence2 image processor and a Florence2 tokenizer into a single processor.
58
+
59
+ [`Florence2Processor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BartTokenizerFast`]. See the
60
+ [`~Florence2Processor.__call__`] and [`~Florence2Processor.decode`] for more information.
61
+
62
+ Args:
63
+ image_processor ([`CLIPImageProcessor`], *optional*):
64
+ The image processor is a required input.
65
+ tokenizer ([`BartTokenizerFast`], *optional*):
66
+ The tokenizer is a required input.
67
+ """
68
+
69
+ attributes = ["image_processor", "tokenizer"]
70
+ image_processor_class = "CLIPImageProcessor"
71
+ tokenizer_class = ("BartTokenizer", "BartTokenizerFast")
72
+
73
+ def __init__(
74
+ self,
75
+ image_processor=None,
76
+ tokenizer=None,
77
+ ):
78
+ if image_processor is None:
79
+ raise ValueError("You need to specify an `image_processor`.")
80
+ if tokenizer is None:
81
+ raise ValueError("You need to specify a `tokenizer`.")
82
+ if not hasattr(image_processor, "image_seq_length"):
83
+ raise ValueError("Image processor is missing an `image_seq_length` attribute.")
84
+
85
+ self.image_seq_length = image_processor.image_seq_length
86
+
87
+ tokens_to_add = {
88
+ 'additional_special_tokens': \
89
+ tokenizer.additional_special_tokens + \
90
+ ['<od>', '</od>', '<ocr>', '</ocr>'] + \
91
+ [f'<loc_{x}>' for x in range(1000)] + \
92
+ ['<cap>', '</cap>', '<ncap>', '</ncap>','<dcap>', '</dcap>', '<grounding>', '</grounding>', '<seg>', '</seg>', '<sep>', '<region_cap>', '</region_cap>', '<region_to_desciption>', '</region_to_desciption>', '<proposal>', '</proposal>', '<poly>', '</poly>', '<and>']
93
+ }
94
+ tokenizer.add_special_tokens(tokens_to_add)
95
+
96
+ self.tasks_answer_post_processing_type = {
97
+ '<OCR>': 'pure_text',
98
+ '<OCR_WITH_REGION>': 'ocr',
99
+ '<CAPTION>': 'pure_text',
100
+ '<DETAILED_CAPTION>': 'pure_text',
101
+ '<MORE_DETAILED_CAPTION>': 'pure_text',
102
+ '<OD>': 'description_with_bboxes',
103
+ '<DENSE_REGION_CAPTION>': 'description_with_bboxes',
104
+ '<CAPTION_TO_PHRASE_GROUNDING>': "phrase_grounding",
105
+ '<REFERRING_EXPRESSION_SEGMENTATION>': 'polygons',
106
+ '<REGION_TO_SEGMENTATION>': 'polygons',
107
+ '<OPEN_VOCABULARY_DETECTION>': 'description_with_bboxes_or_polygons',
108
+ '<REGION_TO_CATEGORY>': 'pure_text',
109
+ '<REGION_TO_DESCRIPTION>': 'pure_text',
110
+ '<REGION_TO_OCR>': 'pure_text',
111
+ '<REGION_PROPOSAL>': 'bboxes'
112
+ }
113
+
114
+ self.task_prompts_without_inputs = {
115
+ '<OCR>': 'What is the text in the image?',
116
+ '<OCR_WITH_REGION>': 'What is the text in the image, with regions?',
117
+ '<CAPTION>': 'What does the image describe?',
118
+ '<DETAILED_CAPTION>': 'Describe in detail what is shown in the image.',
119
+ '<MORE_DETAILED_CAPTION>': 'Describe with a paragraph what is shown in the image.',
120
+ '<OD>': 'Locate the objects with category name in the image.',
121
+ '<DENSE_REGION_CAPTION>': 'Locate the objects in the image, with their descriptions.',
122
+ '<REGION_PROPOSAL>': 'Locate the region proposals in the image.'
123
+ }
124
+
125
+ self.task_prompts_with_input = {
126
+ '<CAPTION_TO_PHRASE_GROUNDING>': "Locate the phrases in the caption: {input}",
127
+ '<REFERRING_EXPRESSION_SEGMENTATION>': 'Locate {input} in the image with mask',
128
+ '<REGION_TO_SEGMENTATION>': 'What is the polygon mask of region {input}',
129
+ '<OPEN_VOCABULARY_DETECTION>': 'Locate {input} in the image.',
130
+ '<REGION_TO_CATEGORY>': 'What is the region {input}?',
131
+ '<REGION_TO_DESCRIPTION>': 'What does the region {input} describe?',
132
+ '<REGION_TO_OCR>': 'What text is in the region {input}?',
133
+ }
134
+
135
+ self.post_processor = Florence2PostProcesser(tokenizer=tokenizer)
136
+
137
+
138
+ super().__init__(image_processor, tokenizer)
139
+
140
+ def _construct_prompts(self, text):
141
+ # replace the task tokens with the task prompts if task token is in the text
142
+ prompts = []
143
+ for _text in text:
144
+ # 1. fixed task prompts without additional inputs
145
+ for task_token, task_prompt in self.task_prompts_without_inputs.items():
146
+ if task_token in _text:
147
+ assert _text == task_token, f"Task token {task_token} should be the only token in the text."
148
+ _text = task_prompt
149
+ break
150
+ # 2. task prompts with additional inputs
151
+ for task_token, task_prompt in self.task_prompts_with_input.items():
152
+ if task_token in _text:
153
+ _text = task_prompt.format(input=_text.replace(task_token, ''))
154
+ break
155
+ prompts.append(_text)
156
+ return prompts
157
+
158
+ def __call__(
159
+ self,
160
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
161
+ images: ImageInput = None,
162
+ tokenize_newline_separately: bool = True,
163
+ padding: Union[bool, str, PaddingStrategy] = False,
164
+ truncation: Union[bool, str, TruncationStrategy] = None,
165
+ max_length=None,
166
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
167
+ do_resize: bool = None,
168
+ do_normalize: bool = None,
169
+ image_mean: Optional[Union[float, List[float]]] = None,
170
+ image_std: Optional[Union[float, List[float]]] = None,
171
+ data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821
172
+ input_data_format: Optional[
173
+ Union[str, "ChannelDimension"] # noqa: F821
174
+ ] = None,
175
+ resample: "PILImageResampling" = None, # noqa: F821
176
+ do_convert_rgb: bool = None,
177
+ do_thumbnail: bool = None,
178
+ do_align_long_axis: bool = None,
179
+ do_rescale: bool = None,
180
+ ) -> BatchFeature:
181
+ """
182
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
183
+ and `kwargs` arguments to BartTokenizerFast's [`~BartTokenizerFast.__call__`] if `text` is not `None` to encode
184
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
185
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
186
+ of the above two methods for more information.
187
+
188
+ Args:
189
+ text (`str`, `List[str]`, `List[List[str]]`):
190
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
191
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
192
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
193
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
194
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
195
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
196
+ number of channels, H and W are image height and width.
197
+ tokenize_newline_separately (`bool`, defaults to `True`):
198
+ Adds a separately tokenized '\n' at the end of the prompt.
199
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
200
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
201
+ index) among:
202
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
203
+ sequence if provided).
204
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
205
+ acceptable input length for the model if that argument is not provided.
206
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
207
+ lengths).
208
+ max_length (`int`, *optional*):
209
+ Maximum length of the returned list and optionally padding length (see above).
210
+ truncation (`bool`, *optional*):
211
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
212
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
213
+ If set, will return tensors of a particular framework. Acceptable values are:
214
+
215
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
216
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
217
+ - `'np'`: Return NumPy `np.ndarray` objects.
218
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
219
+
220
+ Returns:
221
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
222
+
223
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. If `suffix`
224
+ is provided, the `input_ids` will also contain the suffix input ids.
225
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
226
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
227
+ `None`).
228
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
229
+ - **labels** -- Labels compatible with training if `suffix` is not None
230
+ """
231
+
232
+ return_token_type_ids = False
233
+
234
+ if images is None:
235
+ raise ValueError("`images` are expected as arguments to a `Florence2Processor` instance.")
236
+ if text is None:
237
+ logger.warning_once(
238
+ "You are using Florence-2 without a text prompt."
239
+ )
240
+ text = ""
241
+
242
+ if isinstance(text, List) and isinstance(images, List):
243
+ if len(images) < len(text):
244
+ raise ValueError(
245
+ f"Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image."
246
+ )
247
+ if _is_str_or_image(text):
248
+ text = [text]
249
+ elif isinstance(text, list) and _is_str_or_image(text[0]):
250
+ pass
251
+
252
+ pixel_values = self.image_processor(
253
+ images,
254
+ do_resize=do_resize,
255
+ do_normalize=do_normalize,
256
+ return_tensors=return_tensors,
257
+ image_mean=image_mean,
258
+ image_std=image_std,
259
+ input_data_format=input_data_format,
260
+ data_format=data_format,
261
+ resample=resample,
262
+ do_convert_rgb=do_convert_rgb,
263
+ )["pixel_values"]
264
+
265
+ if max_length is not None:
266
+ max_length -= self.image_seq_length # max_length has to account for the image tokens
267
+
268
+ text = self._construct_prompts(text)
269
+
270
+ inputs = self.tokenizer(
271
+ text,
272
+ return_tensors=return_tensors,
273
+ padding=padding,
274
+ max_length=max_length,
275
+ truncation=truncation,
276
+ return_token_type_ids=return_token_type_ids,
277
+ )
278
+
279
+ return_data = {**inputs, "pixel_values": pixel_values}
280
+
281
+ if return_token_type_ids:
282
+ labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100)
283
+ return_data.update({"labels": labels})
284
+ return BatchFeature(data=return_data)
285
+
286
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Florence2
287
+ def batch_decode(self, *args, **kwargs):
288
+ """
289
+ This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
290
+ refer to the docstring of this method for more information.
291
+ """
292
+ return self.tokenizer.batch_decode(*args, **kwargs)
293
+
294
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Florence2
295
+ def decode(self, *args, **kwargs):
296
+ """
297
+ This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
298
+ the docstring of this method for more information.
299
+ """
300
+ return self.tokenizer.decode(*args, **kwargs)
301
+
302
+ @property
303
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->Florence2
304
+ def model_input_names(self):
305
+ tokenizer_input_names = self.tokenizer.model_input_names
306
+ image_processor_input_names = self.image_processor.model_input_names
307
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
308
+
309
+ def post_process_generation(self, text=None, sequence=None, transition_beam_score=None, task=None, image_size=None):
310
+ """
311
+ Post-process the output of the model to each of the task outputs.
312
+
313
+ Args:
314
+ text (`str`): The text to post-process.
315
+ task (`str`): The task to post-process the text for.
316
+ image_size (`Tuple[int, int]`): The size of the image. height x width.
317
+ """
318
+
319
+ task_answer_post_processing_type = self.tasks_answer_post_processing_type.get(task, 'pure_text')
320
+ task_answer = self.post_processor(
321
+ text=text,
322
+ sequence=sequence,
323
+ transition_beam_score=transition_beam_score,
324
+ image_size=image_size,
325
+ parse_tasks=task_answer_post_processing_type,
326
+ )[task_answer_post_processing_type]
327
+
328
+ if task_answer_post_processing_type == 'pure_text':
329
+ final_answer = task_answer
330
+ # remove the special tokens
331
+ final_answer = final_answer.replace('<s>', '').replace('</s>', '')
332
+ elif task_answer_post_processing_type in ['od', 'description_with_bboxes', 'bboxes']:
333
+ od_instances = task_answer
334
+ bboxes_od = [_od_instance['bbox'] for _od_instance in od_instances]
335
+ labels_od = [str(_od_instance['cat_name']) for _od_instance in od_instances]
336
+ final_answer = {'bboxes': bboxes_od, 'labels': labels_od}
337
+ if len(od_instances) and 'score' in od_instances[0]:
338
+ scores_od = [_od_instance['score'] for _od_instance in od_instances]
339
+ final_answer['scores'] = scores_od
340
+ elif task_answer_post_processing_type in ['ocr']:
341
+ bboxes = [_od_instance['quad_box'] for _od_instance in task_answer]
342
+ labels = [str(_od_instance['text']) for _od_instance in task_answer]
343
+ final_answer = {'quad_boxes': bboxes, 'labels': labels}
344
+ elif task_answer_post_processing_type in ['phrase_grounding']:
345
+ bboxes = []
346
+ labels = []
347
+ for _grounded_phrase in task_answer:
348
+ for _bbox in _grounded_phrase['bbox']:
349
+ bboxes.append(_bbox)
350
+ labels.append(_grounded_phrase['cat_name'])
351
+ final_answer = {'bboxes': bboxes, 'labels': labels}
352
+ elif task_answer_post_processing_type in ['description_with_polygons', 'polygons']:
353
+ labels = []
354
+ polygons = []
355
+ for result in task_answer:
356
+ label = result['cat_name']
357
+ _polygons = result['polygons']
358
+ labels.append(label)
359
+ polygons.append(_polygons)
360
+ final_answer = {'polygons': polygons, 'labels': labels}
361
+ elif task_answer_post_processing_type in ['description_with_bboxes_or_polygons']:
362
+ bboxes = []
363
+ bboxes_labels = []
364
+ polygons = []
365
+ polygons_labels = []
366
+ for result in task_answer:
367
+ label = result['cat_name']
368
+ if 'polygons' in result:
369
+ _polygons = result['polygons']
370
+ polygons.append(_polygons)
371
+ polygons_labels.append(label)
372
+ else:
373
+ _bbox = result['bbox']
374
+ bboxes.append(_bbox)
375
+ bboxes_labels.append(label)
376
+ final_answer = {'bboxes': bboxes, 'bboxes_labels': bboxes_labels, 'polygons': polygons, 'polygons_labels': polygons_labels}
377
+ else:
378
+ raise ValueError('Unknown task answer post processing type: {}'.format(task_answer_post_processing_type))
379
+
380
+ final_answer = {
381
+ task: final_answer}
382
+ return final_answer
383
+
384
+ class BoxQuantizer(object):
385
+ def __init__(self, mode, bins):
386
+ self.mode = mode
387
+ self.bins = bins
388
+
389
+ def quantize(self, boxes: torch.Tensor, size):
390
+ bins_w, bins_h = self.bins # Quantization bins.
391
+ size_w, size_h = size # Original image size.
392
+ size_per_bin_w = size_w / bins_w
393
+ size_per_bin_h = size_h / bins_h
394
+ xmin, ymin, xmax, ymax = boxes.split(1, dim=-1) # Shape: 4 * [N, 1].
395
+
396
+ if self.mode == 'floor':
397
+ quantized_xmin = (
398
+ xmin / size_per_bin_w).floor().clamp(0, bins_w - 1)
399
+ quantized_ymin = (
400
+ ymin / size_per_bin_h).floor().clamp(0, bins_h - 1)
401
+ quantized_xmax = (
402
+ xmax / size_per_bin_w).floor().clamp(0, bins_w - 1)
403
+ quantized_ymax = (
404
+ ymax / size_per_bin_h).floor().clamp(0, bins_h - 1)
405
+
406
+ elif self.mode == 'round':
407
+ raise NotImplementedError()
408
+
409
+ else:
410
+ raise ValueError('Incorrect quantization type.')
411
+
412
+ quantized_boxes = torch.cat(
413
+ (quantized_xmin, quantized_ymin, quantized_xmax, quantized_ymax), dim=-1
414
+ ).int()
415
+
416
+ return quantized_boxes
417
+
418
+ def dequantize(self, boxes: torch.Tensor, size):
419
+ bins_w, bins_h = self.bins # Quantization bins.
420
+ size_w, size_h = size # Original image size.
421
+ size_per_bin_w = size_w / bins_w
422
+ size_per_bin_h = size_h / bins_h
423
+ xmin, ymin, xmax, ymax = boxes.split(1, dim=-1) # Shape: 4 * [N, 1].
424
+
425
+ if self.mode == 'floor':
426
+ # Add 0.5 to use the center position of the bin as the coordinate.
427
+ dequantized_xmin = (xmin + 0.5) * size_per_bin_w
428
+ dequantized_ymin = (ymin + 0.5) * size_per_bin_h
429
+ dequantized_xmax = (xmax + 0.5) * size_per_bin_w
430
+ dequantized_ymax = (ymax + 0.5) * size_per_bin_h
431
+
432
+ elif self.mode == 'round':
433
+ raise NotImplementedError()
434
+
435
+ else:
436
+ raise ValueError('Incorrect quantization type.')
437
+
438
+ dequantized_boxes = torch.cat(
439
+ (dequantized_xmin, dequantized_ymin,
440
+ dequantized_xmax, dequantized_ymax), dim=-1
441
+ )
442
+
443
+ return dequantized_boxes
444
+
445
+
446
+ class CoordinatesQuantizer(object):
447
+ """
448
+ Quantize coornidates (Nx2)
449
+ """
450
+
451
+ def __init__(self, mode, bins):
452
+ self.mode = mode
453
+ self.bins = bins
454
+
455
+ def quantize(self, coordinates: torch.Tensor, size):
456
+ bins_w, bins_h = self.bins # Quantization bins.
457
+ size_w, size_h = size # Original image size.
458
+ size_per_bin_w = size_w / bins_w
459
+ size_per_bin_h = size_h / bins_h
460
+ assert coordinates.shape[-1] == 2, 'coordinates should be shape (N, 2)'
461
+ x, y = coordinates.split(1, dim=-1) # Shape: 4 * [N, 1].
462
+
463
+ if self.mode == 'floor':
464
+ quantized_x = (x / size_per_bin_w).floor().clamp(0, bins_w - 1)
465
+ quantized_y = (y / size_per_bin_h).floor().clamp(0, bins_h - 1)
466
+
467
+ elif self.mode == 'round':
468
+ raise NotImplementedError()
469
+
470
+ else:
471
+ raise ValueError('Incorrect quantization type.')
472
+
473
+ quantized_coordinates = torch.cat(
474
+ (quantized_x, quantized_y), dim=-1
475
+ ).int()
476
+
477
+ return quantized_coordinates
478
+
479
+ def dequantize(self, coordinates: torch.Tensor, size):
480
+ bins_w, bins_h = self.bins # Quantization bins.
481
+ size_w, size_h = size # Original image size.
482
+ size_per_bin_w = size_w / bins_w
483
+ size_per_bin_h = size_h / bins_h
484
+ assert coordinates.shape[-1] == 2, 'coordinates should be shape (N, 2)'
485
+ x, y = coordinates.split(1, dim=-1) # Shape: 4 * [N, 1].
486
+
487
+ if self.mode == 'floor':
488
+ # Add 0.5 to use the center position of the bin as the coordinate.
489
+ dequantized_x = (x + 0.5) * size_per_bin_w
490
+ dequantized_y = (y + 0.5) * size_per_bin_h
491
+
492
+ elif self.mode == 'round':
493
+ raise NotImplementedError()
494
+
495
+ else:
496
+ raise ValueError('Incorrect quantization type.')
497
+
498
+ dequantized_coordinates = torch.cat(
499
+ (dequantized_x, dequantized_y), dim=-1
500
+ )
501
+
502
+ return dequantized_coordinates
503
+
504
+
505
+ class Florence2PostProcesser(object):
506
+ r"""
507
+ Florence-2 post process for converting text prediction to various tasks results.
508
+
509
+ Args:
510
+ config: A dict of configs.
511
+ tokenizer: A tokenizer for decoding text to spans.
512
+ sample config:
513
+ UNIFIED_POST_PROCESS:
514
+ # commom configs
515
+ NUM_BBOX_HEIGHT_BINS: 1000
516
+ NUM_BBOX_WIDTH_BINS: 1000
517
+ COORDINATES_HEIGHT_BINS: 1000
518
+ COORDINATES_WIDTH_BINS: 1000
519
+ # task specific configs, override the common configs
520
+ PRASE_TASKS:
521
+ - TASK_NAME: 'video_dense_caption'
522
+ PATTERN: 'r<time_(\d+)><time_(\d+)>([a-zA-Z0-9 ]+)'
523
+ SCORE_MODE: 'avg_cat_name_scores'
524
+ NUM_BINS: 100
525
+ - TASK_NAME: 'od'
526
+ PATTERN: 'r<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>([a-zA-Z0-9 ]+)'
527
+ SCORE_MODE: 'avg_cat_name_scores'
528
+
529
+ Returns:
530
+ parsed_dict (dict): A dict of parsed results.
531
+ """
532
+ def __init__(
533
+ self,
534
+ tokenizer=None
535
+ ):
536
+ parse_tasks = []
537
+ parse_task_configs = {}
538
+ config = self._create_default_config()
539
+ for task in config['PARSE_TASKS']:
540
+ parse_tasks.append(task['TASK_NAME'])
541
+ parse_task_configs[task['TASK_NAME']] = task
542
+
543
+ self.config = config
544
+ self.parse_tasks = parse_tasks
545
+ self.parse_tasks_configs = parse_task_configs
546
+
547
+ self.tokenizer = tokenizer
548
+ if self.tokenizer is not None:
549
+ self.all_special_tokens = set(self.tokenizer.all_special_tokens)
550
+
551
+ self.init_quantizers()
552
+ self.black_list_of_phrase_grounding = self._create_black_list_of_phrase_grounding()
553
+
554
+ def _create_black_list_of_phrase_grounding(self):
555
+ black_list = {}
556
+
557
+ if 'phrase_grounding' in self.parse_tasks and self.parse_tasks_configs['phrase_grounding']['FILTER_BY_BLACK_LIST']:
558
+ black_list = set(
559
+ ['it', 'I', 'me', 'mine',
560
+ 'you', 'your', 'yours',
561
+ 'he', 'him', 'his',
562
+ 'she', 'her', 'hers',
563
+ 'they', 'them', 'their', 'theirs',
564
+ 'one', 'oneself',
565
+ 'we', 'us', 'our', 'ours',
566
+ 'you', 'your', 'yours',
567
+ 'they', 'them', 'their', 'theirs',
568
+ 'mine', 'yours', 'his', 'hers', 'its',
569
+ 'ours', 'yours', 'theirs',
570
+ 'myself', 'yourself', 'himself', 'herself', 'itself',
571
+ 'ourselves', 'yourselves', 'themselves',
572
+ 'this', 'that',
573
+ 'these', 'those',
574
+ 'who', 'whom', 'whose', 'which', 'what',
575
+ 'who', 'whom', 'whose', 'which', 'that',
576
+ 'all', 'another', 'any', 'anybody', 'anyone', 'anything',
577
+ 'each', 'everybody', 'everyone', 'everything',
578
+ 'few', 'many', 'nobody', 'none', 'one', 'several',
579
+ 'some', 'somebody', 'someone', 'something',
580
+ 'each other', 'one another',
581
+ 'myself', 'yourself', 'himself', 'herself', 'itself',
582
+ 'ourselves', 'yourselves', 'themselves',
583
+ 'the image', 'image', 'images', 'the', 'a', 'an', 'a group',
584
+ 'other objects', 'lots', 'a set',
585
+ ]
586
+ )
587
+
588
+ return black_list
589
+
590
+ def _create_default_config(self):
591
+ config = {
592
+ 'NUM_BBOX_HEIGHT_BINS': 1000,
593
+ 'NUM_BBOX_WIDTH_BINS': 1000,
594
+ 'BOX_QUANTIZATION_MODE': 'floor',
595
+ 'COORDINATES_HEIGHT_BINS': 1000,
596
+ 'COORDINATES_WIDTH_BINS': 1000,
597
+ 'COORDINATES_QUANTIZATION_MODE': 'floor',
598
+ 'PARSE_TASKS': [
599
+ {
600
+ 'TASK_NAME': 'od',
601
+ 'PATTERN': r'([a-zA-Z0-9 ]+)<loc_(\\d+)><loc_(\\d+)><loc_(\\d+)><loc_(\\d+)>',
602
+ 'SCORE_MODE': 'avg_loc_scores'
603
+ },
604
+ {
605
+ 'TASK_NAME': 'ocr',
606
+ 'PATTERN': r'(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>',
607
+ 'AREA_THRESHOLD': 0.00
608
+ },
609
+ {
610
+ 'TASK_NAME': 'phrase_grounding',
611
+ 'FILTER_BY_BLACK_LIST': True
612
+ },
613
+ {
614
+ 'TASK_NAME': 'pure_text',
615
+ },
616
+ {
617
+ 'TASK_NAME': 'description_with_bboxes',
618
+ 'SCORE_MODE': 'avg_loc_scores'
619
+ },
620
+ {
621
+ 'TASK_NAME': 'description_with_polygons',
622
+ },
623
+ {
624
+ 'TASK_NAME': 'polygons',
625
+ },
626
+ {
627
+ 'TASK_NAME': 'bboxes',
628
+ },
629
+ {
630
+ 'TASK_NAME': 'description_with_bboxes_or_polygons',
631
+ }
632
+ ]
633
+ }
634
+
635
+ return config
636
+
637
+ def init_quantizers(self):
638
+ # we have box_quantizer (od, grounding) and coordinates_quantizer (ocr, referring_segmentation)
639
+ num_bbox_height_bins = self.config.get('NUM_BBOX_HEIGHT_BINS', 1000)
640
+ num_bbox_width_bins = self.config.get('NUM_BBOX_WIDTH_BINS', 1000)
641
+ box_quantization_mode = self.config.get('BOX_QUANTIZATION_MODE', 'floor')
642
+ self.box_quantizer = BoxQuantizer(
643
+ box_quantization_mode,
644
+ (num_bbox_width_bins, num_bbox_height_bins),
645
+ )
646
+
647
+ num_bbox_height_bins = self.config['COORDINATES_HEIGHT_BINS'] if 'COORDINATES_HEIGHT_BINS' in self.config else self.config.get('NUM_BBOX_HEIGHT_BINS', 1000)
648
+ num_bbox_width_bins = self.config['COORDINATES_WIDTH_BINS'] if 'COORDINATES_WIDTH_BINS' in self.config else self.config.get('NUM_BBOX_WIDTH_BINS', 1000)
649
+ box_quantization_mode = self.config.get('COORDINATES_QUANTIZATION_MODE') if 'COORDINATES_QUANTIZATION_MODE' in self.config else self.config.get('BOX_QUANTIZATION_MODE', 'floor')
650
+ self.coordinates_quantizer = CoordinatesQuantizer(
651
+ box_quantization_mode,
652
+ (num_bbox_width_bins, num_bbox_height_bins),
653
+ )
654
+
655
+ def decode_with_spans(self, tokenizer, token_ids):
656
+ filtered_tokens = tokenizer.convert_ids_to_tokens(
657
+ token_ids, skip_special_tokens=False)
658
+ assert len(filtered_tokens) == len(token_ids)
659
+ sub_texts = []
660
+ for token in filtered_tokens:
661
+ if token in self.all_special_tokens:
662
+ sub_texts.append(token)
663
+ else:
664
+ if isinstance(tokenizer, (BartTokenizer, BartTokenizerFast)):
665
+ sub_text = tokenizer.convert_tokens_to_string([token])
666
+ else:
667
+ raise ValueError(f'type {type(tokenizer)} not supported')
668
+ sub_texts.append(sub_text)
669
+
670
+ text = ''
671
+ spans = []
672
+ for sub_text in sub_texts:
673
+ span = (len(text), len(text) + len(sub_text)) # [start index, end index).
674
+ text += sub_text
675
+ spans.append(span)
676
+ return text, spans
677
+
678
+ def parse_od_from_text_and_spans(
679
+ self,
680
+ text,
681
+ pattern,
682
+ image_size,
683
+ phrase_centric=False
684
+ ):
685
+ parsed = list(re.finditer(pattern, text))
686
+
687
+ instances = []
688
+ for i in range(len(parsed)):
689
+ # Prepare instance.
690
+ instance = {}
691
+
692
+ if phrase_centric:
693
+ bbox_bins = [int(parsed[i].group(j)) for j in range(2, 6)]
694
+ else:
695
+ bbox_bins = [int(parsed[i].group(j)) for j in range(1, 5)]
696
+ instance['bbox'] = self.box_quantizer.dequantize(
697
+ boxes=torch.tensor(bbox_bins),
698
+ size=image_size
699
+ ).tolist()
700
+
701
+ if phrase_centric:
702
+ instance['cat_name'] = parsed[i].group(1).lower().strip()
703
+ else:
704
+ instance['cat_name'] = parsed[i].group(5).lower().strip()
705
+ instances.append(instance)
706
+
707
+ return instances
708
+
709
+ def parse_ocr_from_text_and_spans(self,
710
+ text,
711
+ pattern,
712
+ image_size,
713
+ area_threshold=-1.0,
714
+ ):
715
+ bboxes = []
716
+ labels = []
717
+ text = text.replace('<s>', '')
718
+ # ocr with regions
719
+ parsed = re.findall(pattern, text)
720
+ instances = []
721
+ image_width, image_height = image_size
722
+
723
+ for ocr_line in parsed:
724
+ ocr_content = ocr_line[0]
725
+ quad_box = ocr_line[1:]
726
+ quad_box = [int(i) for i in quad_box]
727
+ quad_box = self.coordinates_quantizer.dequantize(
728
+ torch.tensor(np.array(quad_box).reshape(-1, 2)),
729
+ size=image_size
730
+ ).reshape(-1).tolist()
731
+
732
+ if area_threshold > 0:
733
+ x_coords = [i for i in quad_box[0::2]]
734
+ y_coords = [i for i in quad_box[1::2]]
735
+
736
+ # apply the Shoelace formula
737
+ area = 0.5 * abs(sum(x_coords[i] * y_coords[i + 1] - x_coords[i + 1] * y_coords[i] for i in range(4 - 1)))
738
+
739
+ if area < (image_width * image_height) * area_threshold:
740
+ continue
741
+
742
+ bboxes.append(quad_box)
743
+ labels.append(ocr_content)
744
+ instances.append({
745
+ 'quad_box': quad_box,
746
+ 'text': ocr_content,
747
+ })
748
+ return instances
749
+
750
+ def parse_phrase_grounding_from_text_and_spans(self, text, pattern, image_size):
751
+ # ignore <s> </s> and <pad>
752
+ cur_span = 0
753
+ if text.startswith('<s>'):
754
+ cur_span += 3
755
+
756
+ text = text.replace('<s>', '')
757
+ text = text.replace('</s>', '')
758
+ text = text.replace('<pad>', '')
759
+
760
+ pattern = r"([^<]+(?:<loc_\d+>){4,})"
761
+ phrases = re.findall(pattern, text)
762
+
763
+ # pattern should be text pattern and od pattern
764
+ pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)'
765
+ box_pattern = r'<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>'
766
+
767
+ instances = []
768
+ for pharse_text in phrases:
769
+ phrase_text_strip = pharse_text.replace('<ground>', '', 1)
770
+ phrase_text_strip = pharse_text.replace('<obj>', '', 1)
771
+
772
+ if phrase_text_strip == '':
773
+ cur_span += len(pharse_text)
774
+ continue
775
+
776
+ # Prepare instance.
777
+ instance = {}
778
+
779
+ # parse phrase, get string
780
+ phrase = re.search(pattern, phrase_text_strip)
781
+ if phrase is None:
782
+ cur_span += len(pharse_text)
783
+ continue
784
+
785
+ # parse bboxes by box_pattern
786
+ bboxes_parsed = list(re.finditer(box_pattern, pharse_text))
787
+ if len(bboxes_parsed) == 0:
788
+ cur_span += len(pharse_text)
789
+ continue
790
+
791
+ phrase = phrase.group()
792
+ # remove leading and trailing spaces
793
+ phrase = phrase.strip()
794
+
795
+ if phrase in self.black_list_of_phrase_grounding:
796
+ cur_span += len(pharse_text)
797
+ continue
798
+
799
+ # a list of list
800
+ bbox_bins = [[int(_bboxes_parsed.group(j)) for j in range(1, 5)] for _bboxes_parsed in bboxes_parsed]
801
+ instance['bbox'] = self.box_quantizer.dequantize(
802
+ boxes=torch.tensor(bbox_bins),
803
+ size=image_size
804
+ ).tolist()
805
+
806
+ # exclude non-ascii characters
807
+ phrase = phrase.encode('ascii',errors='ignore').decode('ascii')
808
+ instance['cat_name'] = phrase
809
+
810
+ instances.append(instance)
811
+
812
+ return instances
813
+
814
+ def parse_description_with_bboxes_from_text_and_spans(
815
+ self,
816
+ text,
817
+ spans=None,
818
+ scores=None,
819
+ score_mode=None,
820
+ pattern=None,
821
+ image_size=None,
822
+ allow_empty_phrase=False
823
+ ):
824
+ def find_matched_token_indices(cur_span, token_spans):
825
+ inds = []
826
+ for i, token_span in enumerate(token_spans):
827
+ if not (token_span[1] <= cur_span[0] or token_span[0] >= cur_span[1]):
828
+ inds.append(i)
829
+ return inds
830
+
831
+ cur_span = 0
832
+ if text.startswith('<s>'):
833
+ cur_span += 3
834
+
835
+ text = text.replace('<s>', '')
836
+ text = text.replace('</s>', '')
837
+ text = text.replace('<pad>', '')
838
+
839
+ if allow_empty_phrase:
840
+ pattern = rf"(?:(?:<loc_\d+>){{4,}})"
841
+ else:
842
+ pattern = r"([^<]+(?:<loc_\d+>){4,})"
843
+ phrases = re.findall(pattern, text)
844
+
845
+ # pattern should be text pattern and od pattern
846
+ pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)'
847
+ box_pattern = r'<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>'
848
+
849
+ instances = []
850
+ for pharse_text in phrases:
851
+ phrase_text_strip = pharse_text.replace('<ground>', '', 1)
852
+ phrase_text_strip = pharse_text.replace('<obj>', '', 1)
853
+
854
+ if phrase_text_strip == '' and not allow_empty_phrase:
855
+ cur_span += len(pharse_text)
856
+ continue
857
+
858
+ # parse phrase, get string
859
+ phrase = re.search(pattern, phrase_text_strip)
860
+ if phrase is None:
861
+ cur_span += len(pharse_text)
862
+ continue
863
+
864
+ phrase_span = phrase.span()
865
+ phrase = phrase.group()
866
+ # remove leading and trailing spaces
867
+ phrase = phrase.strip()
868
+
869
+ # parse bboxes by box_pattern
870
+ bboxes_parsed = list(re.finditer(box_pattern, pharse_text))
871
+ if len(bboxes_parsed) == 0:
872
+ cur_span += len(pharse_text)
873
+ continue
874
+
875
+ # a list of list
876
+ bbox_bins = [[int(_bboxes_parsed.group(j)) for j in range(1, 5)] for _bboxes_parsed in bboxes_parsed]
877
+
878
+ bboxes = self.box_quantizer.dequantize(
879
+ boxes=torch.tensor(bbox_bins),
880
+ size=image_size
881
+ ).tolist()
882
+
883
+ if score_mode == 'avg_loc_scores':
884
+ if spans is None or scores is None:
885
+ all_scores = None
886
+ else:
887
+ bbox_end_spans = [_bboxes_parsed.span(0) for _bboxes_parsed in bboxes_parsed]
888
+ all_scores = []
889
+ for _spans in bbox_end_spans:
890
+ token_inds = find_matched_token_indices((_spans[0] + cur_span, _spans[1]+ cur_span), spans)
891
+ loc_scores = [scores[token_i] for token_i in token_inds]
892
+ score = sum(loc_scores) / len(loc_scores)
893
+ all_scores.append(score)
894
+ elif score_mode == 'avg_cat_name_scores':
895
+ if spans is None or scores is None:
896
+ all_scores = None
897
+ else:
898
+ cat_name_token_inds = find_matched_token_indices((phrase_span[0] + cur_span, phrase_span[1]+cur_span), spans)
899
+ cat_name_scores = [scores[token_i] for token_i in cat_name_token_inds]
900
+ score = sum(cat_name_scores) / len(cat_name_scores)
901
+ all_scores = [score] * len(bboxes)
902
+ elif score_mode is None:
903
+ all_scores = None
904
+ else:
905
+ raise ValueError('Unknown score mode: {}'.format(score_mode))
906
+
907
+ phrase = phrase.encode('ascii',errors='ignore').decode('ascii')
908
+ for _idx, _bboxes in enumerate(bboxes):
909
+ # Prepare instance.
910
+ instance = {}
911
+ instance['bbox'] = _bboxes
912
+ # exclude non-ascii characters
913
+ instance['cat_name'] = phrase
914
+ if all_scores is not None:
915
+ instance['score'] = math.exp(all_scores[_idx])
916
+ instances.append(instance)
917
+
918
+ cur_span += len(pharse_text)
919
+
920
+ return instances
921
+
922
+ def parse_description_with_polygons_from_text_and_spans(self, text, pattern, image_size,
923
+ allow_empty_phrase=False,
924
+ polygon_sep_token='<sep>',
925
+ polygon_start_token='<poly>',
926
+ polygon_end_token='</poly>',
927
+ with_box_at_start=False,
928
+ ):
929
+
930
+ # ref_seg format: '<expression><x1><y1><x2><y2><><><sep><><><><>'
931
+ # ignore <s> </s> and <pad>
932
+
933
+ text = text.replace('<s>', '')
934
+ text = text.replace('</s>', '')
935
+ text = text.replace('<pad>', '')
936
+
937
+ if allow_empty_phrase:
938
+ pattern = rf"(?:(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})"
939
+ else:
940
+ # [^<]+: This part matches one or more characters that are not the < symbol.
941
+ # The ^ inside the square brackets [] is a negation, meaning it matches anything except <.
942
+ #
943
+ pattern = rf"([^<]+(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})"
944
+ phrases = re.findall(pattern, text)
945
+
946
+ phrase_string_pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_|<poly>)'
947
+ box_pattern = rf'((?:<loc_\d+>)+)(?:{re.escape(polygon_sep_token)}|$)'
948
+
949
+ # one polygons instance is separated by polygon_start_token and polygon_end_token
950
+ polygons_instance_pattern = rf'{re.escape(polygon_start_token)}(.*?){re.escape(polygon_end_token)}'
951
+
952
+ instances = []
953
+ for phrase_text in phrases:
954
+
955
+ # exclude loc_\d+>
956
+ # need to get span if want to include category score
957
+ phrase_text_strip = re.sub(r'^loc_\d+>', '', phrase_text, count=1)
958
+
959
+ # phrase = phrase.replace('<poly>', '')
960
+ # phrase = phrase.replace('poly>', '')
961
+
962
+ if phrase_text_strip == '' and not allow_empty_phrase:
963
+ continue
964
+
965
+
966
+ # parse phrase, get string
967
+ phrase = re.search(phrase_string_pattern, phrase_text_strip)
968
+ if phrase is None:
969
+ continue
970
+ phrase = phrase.group()
971
+ # remove leading and trailing spaces
972
+ phrase = phrase.strip()
973
+
974
+ # parse bboxes by box_pattern
975
+
976
+ # split by polygon_start_token and polygon_end_token first using polygons_instance_pattern
977
+ if polygon_start_token in phrase_text and polygon_end_token in phrase_text:
978
+ polygons_instances_parsed = list(re.finditer(polygons_instance_pattern, phrase_text))
979
+ else:
980
+ polygons_instances_parsed = [phrase_text]
981
+
982
+ for _polygons_instances_parsed in polygons_instances_parsed:
983
+ # Prepare instance.
984
+ instance = {}
985
+
986
+ # polygons_parsed= list(re.finditer(box_pattern, phrase_text))
987
+ if isinstance(_polygons_instances_parsed, str):
988
+ polygons_parsed= list(re.finditer(box_pattern, _polygons_instances_parsed))
989
+ else:
990
+ polygons_parsed= list(re.finditer(box_pattern, _polygons_instances_parsed.group(1)))
991
+ if len(polygons_parsed) == 0:
992
+ continue
993
+
994
+ # a list of list (polygon)
995
+ bbox = []
996
+ polygons = []
997
+ for _polygon_parsed in polygons_parsed:
998
+ # group 1: whole <loc_\d+>...</loc_\d+>
999
+ _polygon = _polygon_parsed.group(1)
1000
+ # parse into list of int
1001
+ _polygon = [int(_loc_parsed.group(1)) for _loc_parsed in re.finditer(r'<loc_(\d+)>', _polygon)]
1002
+ if with_box_at_start and len(bbox) == 0:
1003
+ if len(_polygon) > 4:
1004
+ # no valid bbox prediction
1005
+ bbox = _polygon[:4]
1006
+ _polygon = _polygon[4:]
1007
+ else:
1008
+ bbox = [0, 0, 0, 0]
1009
+ # abandon last element if is not paired
1010
+ if len(_polygon) % 2 == 1:
1011
+ _polygon = _polygon[:-1]
1012
+
1013
+ # reshape into (n, 2)
1014
+ _polygon = self.coordinates_quantizer.dequantize(
1015
+ torch.tensor(np.array(_polygon).reshape(-1, 2)),
1016
+ size=image_size
1017
+ ).reshape(-1).tolist()
1018
+ # reshape back
1019
+ polygons.append(_polygon)
1020
+
1021
+ instance['cat_name'] = phrase
1022
+ instance['polygons'] = polygons
1023
+ if len(bbox) != 0:
1024
+ instance['bbox'] = self.box_quantizer.dequantize(
1025
+ boxes=torch.tensor([bbox]),
1026
+ size=image_size
1027
+ ).tolist()[0]
1028
+
1029
+ instances.append(instance)
1030
+
1031
+ return instances
1032
+
1033
+ def __call__(
1034
+ self,
1035
+ text=None,
1036
+ sequence=None,
1037
+ transition_beam_score=None,
1038
+ image_size=None,
1039
+ parse_tasks=None,
1040
+ ):
1041
+ """
1042
+ Args:
1043
+ text: model outputs
1044
+ image_size: (width, height)
1045
+ parse_tasks: a list of tasks to parse, if None, parse all tasks.
1046
+
1047
+ """
1048
+ if parse_tasks is not None:
1049
+ if isinstance(parse_tasks, str):
1050
+ parse_tasks = [parse_tasks]
1051
+ for _parse_task in parse_tasks:
1052
+ assert _parse_task in self.parse_tasks, f'parse task {_parse_task} not supported'
1053
+
1054
+ # sequence or text should be provided
1055
+ assert sequence is not None or text is not None, 'sequence or text should be provided'
1056
+ assert sequence is None or text is None, 'only one of sequence and text should be provided'
1057
+
1058
+ if sequence is not None:
1059
+ sequence = sequence.tolist()[1:]
1060
+ text, spans = self.decode_with_spans(self.tokenizer, sequence)
1061
+ if transition_beam_score is not None:
1062
+ transition_beam_score = transition_beam_score.tolist()
1063
+ assert len(sequence) == len(transition_beam_score)
1064
+ else:
1065
+ spans = None
1066
+ transition_beam_score = None
1067
+
1068
+ parsed_dict = {
1069
+ 'text': text
1070
+ }
1071
+
1072
+ for task in self.parse_tasks:
1073
+ if parse_tasks is not None and task not in parse_tasks:
1074
+ continue
1075
+
1076
+ pattern = self.parse_tasks_configs[task].get('PATTERN', None)
1077
+ score_mode = self.parse_tasks_configs[task].get('SCORE_MODE', None)
1078
+
1079
+ if task == 'ocr':
1080
+ instances = self.parse_ocr_from_text_and_spans(
1081
+ text,
1082
+ pattern=pattern,
1083
+ image_size=image_size,
1084
+ area_threshold=self.parse_tasks_configs[task].get('AREA_THRESHOLD', 0.0),
1085
+ )
1086
+ parsed_dict['ocr'] = instances
1087
+ elif task == 'phrase_grounding':
1088
+ instances = self.parse_phrase_grounding_from_text_and_spans(
1089
+ text,
1090
+ pattern=pattern,
1091
+ image_size=image_size,
1092
+ )
1093
+ parsed_dict['phrase_grounding'] = instances
1094
+ elif task == 'pure_text':
1095
+ parsed_dict['pure_text'] = text
1096
+ elif task == 'description_with_bboxes':
1097
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1098
+ text,
1099
+ spans=spans,
1100
+ scores=transition_beam_score,
1101
+ score_mode=score_mode,
1102
+ pattern=pattern,
1103
+ image_size=image_size,
1104
+ )
1105
+ parsed_dict['description_with_bboxes'] = instances
1106
+ elif task == 'description_with_polygons':
1107
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1108
+ text,
1109
+ pattern=pattern,
1110
+ image_size=image_size,
1111
+ )
1112
+ parsed_dict['description_with_polygons'] = instances
1113
+ elif task == 'polygons':
1114
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1115
+ text,
1116
+ pattern=pattern,
1117
+ image_size=image_size,
1118
+ allow_empty_phrase=True,
1119
+ )
1120
+ parsed_dict['polygons'] = instances
1121
+ elif task == 'bboxes':
1122
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1123
+ text,
1124
+ pattern=pattern,
1125
+ image_size=image_size,
1126
+ allow_empty_phrase=True,
1127
+ )
1128
+ parsed_dict['bboxes'] = instances
1129
+ elif task == 'description_with_bboxes_or_polygons':
1130
+ if '<poly>' in text:
1131
+ # only support either polygons or bboxes, not both at the same time
1132
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1133
+ text,
1134
+ pattern=pattern,
1135
+ image_size=image_size,
1136
+ )
1137
+ else:
1138
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1139
+ text,
1140
+ pattern=pattern,
1141
+ image_size=image_size,
1142
+ )
1143
+ parsed_dict['description_with_bboxes_or_polygons'] = instances
1144
+ else:
1145
+ raise ValueError("task {} is not supported".format(task))
1146
+
1147
+ return parsed_dict
sd_config.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+ logger = logging.get_logger(__name__)
5
+
6
+ class EmptyClass(PretrainedConfig):
7
+ def __init__(self):
8
+ pass
9
+ class SDConfig(PretrainedConfig):
10
+
11
+ def __init__(self,
12
+ override_total_steps = -1,
13
+ freeze_vae = True,
14
+ use_flash = False,
15
+ adapt_topk = -1,
16
+ loss = 'mse',
17
+ mean = [0.485, 0.456, 0.406],
18
+ std = [0.229, 0.224, 0.225],
19
+ use_same_noise_among_timesteps = False,
20
+ random_timestep_per_iteration = True,
21
+ rand_timestep_equal_int = False,
22
+ output_dir = './outputs/First_Start',
23
+ do_center_crop_size = 384,
24
+ architectures = None,
25
+ input = None,
26
+ model = None,
27
+ tta = None,
28
+ **kwargs
29
+
30
+
31
+ ):
32
+ super().__init__()
33
+ self.model = EmptyClass()
34
+ self.model.override_total_steps = override_total_steps
35
+ self.model.freeze_vae = freeze_vae
36
+ self.model.use_flash = use_flash
37
+ self.tta = EmptyClass()
38
+ self.tta.gradient_descent = EmptyClass()
39
+ self.tta.adapt_topk = adapt_topk
40
+ self.tta.loss = loss
41
+ self.tta.use_same_noise_among_timesteps = use_same_noise_among_timesteps
42
+ self.tta.random_timestep_per_iteration = random_timestep_per_iteration
43
+ self.tta.rand_timestep_equal_int = rand_timestep_equal_int
44
+ self.input = EmptyClass()
45
+ self.input.mean = mean
46
+ self.input.std = std
47
+ self.output_dir = output_dir
48
+ self.do_center_crop_size = do_center_crop_size
49
+ self.architectures = architectures
50
+ for k, v in kwargs.items():
51
+ setattr(self, k, v)
52
+ if __name__ =='__main__':
53
+ SDConfig()
utils.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility functions"""
2
+ import importlib
3
+ import random
4
+
5
+ import torch
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+
10
+
11
+ class UnNormalize(object):
12
+ """Unformalize image as: image = (image * std) + mean
13
+ """
14
+ def __init__(self, mean, std):
15
+ self.mean = torch.tensor(mean)
16
+ self.std = torch.tensor(std)
17
+
18
+ def __call__(self, tensor):
19
+ """
20
+ Args:
21
+ tensor: A tensor of shape [C, H, W] or [N, C, H, W]
22
+
23
+ Returns:
24
+ tensor: A tensor of shape [C, H, W] or [N, C, H, W]
25
+ """
26
+
27
+ std = self.std.to(tensor.device)
28
+ mean = self.mean.to(tensor.device)
29
+ if tensor.ndim == 3:
30
+ std, mean = std.view(-1, 1, 1), mean.view(-1, 1, 1)
31
+ elif tensor.ndim == 4:
32
+ std, mean = std.view(1, -1, 1, 1), mean.view(1, -1, 1, 1)
33
+ tensor = (tensor * std) + mean
34
+ return tensor
35
+
36
+
37
+ class VQVAEUnNormalize(UnNormalize):
38
+ """Unformalize image as:
39
+ First: image = (image * std) + mean
40
+ Second: image = (image * 2) - 1
41
+ """
42
+ def __call__(self, tensor):
43
+ """
44
+ Args:
45
+ tensor (Tensor): Tensor image of size (C, H, W) or (N, C, H, W)
46
+ to be unnormalized.
47
+ Returns:
48
+ Tensor: UnNormalized image.
49
+ """
50
+ tensor = super().__call__(tensor)
51
+ tensor = 2 * tensor - 1
52
+ return tensor
53
+
54
+ def normalize(image,rescale=True):
55
+
56
+ if rescale:
57
+ image = image.float() / 255.0 # Convert to float and rescale to [0, 1]
58
+ normalize_image = 2*image-1 # normalize to [-1, 1]
59
+
60
+ return normalize_image
61
+
62
+ # train_transforms = transforms.Compose(
63
+ # [
64
+ # transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
65
+ # transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
66
+ # transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
67
+ # transforms.ToTensor(),
68
+ # transforms.Normalize([0.5], [0.5]),
69
+ # ]
70
+ # )
71
+
72
+
73
+ def mean_list(l):
74
+ l = [int(_l) for _l in l]
75
+ return float(sum(l)) / len(l)
76
+
77
+
78
+ def segment_mean(x, index):
79
+ """Function as tf.segment_mean.
80
+ """
81
+ x = x.view(-1, x.shape[-1])
82
+ index = index.view(-1)
83
+
84
+ max_index = index.max() + 1
85
+ sum_x = torch.zeros((max_index, x.shape[-1]),
86
+ dtype=x.dtype,
87
+ device=x.device)
88
+ num_index = torch.zeros((max_index,),
89
+ dtype=x.dtype,
90
+ device=x.device)
91
+
92
+ num_index = num_index.scatter_add_(
93
+ 0, index, torch.ones_like(index, dtype=x.dtype))
94
+ num_index = torch.where(torch.eq(num_index, 0),
95
+ torch.ones_like(num_index, dtype=x.dtype),
96
+ num_index)
97
+
98
+ index_2d = index.view(-1, 1).expand(-1, x.shape[-1])
99
+ sum_x = sum_x.scatter_add_(0, index_2d, x)
100
+ mean_x = sum_x.div_(num_index.view(-1, 1))
101
+
102
+ return mean_x
103
+
104
+
105
+
106
+
107
+
108
+ def initiate_time_steps(step, total_timestep, batch_size, config):
109
+ """A helper function to initiate time steps for the diffusion model.
110
+
111
+ Args:
112
+ step: An integer of the constant step
113
+ total_timestep: An integer of the total timesteps of the diffusion model
114
+ batch_size: An integer of the batch size
115
+ config: A config object
116
+
117
+ Returns:
118
+ timesteps: A tensor of shape [batch_size,] of the time steps
119
+ """
120
+ if config.tta.rand_timestep_equal_int:
121
+ # the same timestep for each image in the batch
122
+ interval_val = total_timestep // batch_size
123
+ start_point = random.randint(0, interval_val - 1)
124
+ timesteps = torch.tensor(
125
+ list(range(start_point, total_timestep, interval_val))
126
+ ).long()
127
+ return timesteps
128
+ elif config.tta.random_timestep_per_iteration:
129
+ # random timestep for each image in the batch
130
+ return torch.randint(0, total_timestep, (batch_size,)).long() #default
131
+ else:
132
+ # why we need to do this?
133
+ return torch.tensor([step] * batch_size).long()
134
+
135
+
136
+ def instantiate_from_config(config):
137
+ """A helper function to instantiate a class from a config object.
138
+ See https://github.com/CompVis/stable-diffusion/blob/main/ldm/util.py
139
+ """
140
+ if not "target" in config:
141
+ if config == '__is_first_stage__':
142
+ return None
143
+ elif config == "__is_unconditional__":
144
+ return None
145
+ raise KeyError("Expected key `target` to instantiate.")
146
+ return get_obj_from_str(config["target"])(**config.get("params", dict()))
147
+
148
+
149
+ def get_obj_from_str(string, reload=False):
150
+ """A helper function to instantiate a class from a config object.
151
+ See https://github.com/CompVis/stable-diffusion/blob/main/ldm/util.py
152
+ """
153
+ module, cls = string.rsplit(".", 1)
154
+ if reload:
155
+ module_imp = importlib.import_module(module)
156
+ importlib.reload(module_imp)
157
+ return getattr(importlib.import_module(module, package=None), cls)