| """MiniMaxVL01 model configuration""" | |
| from transformers.configuration_utils import PretrainedConfig | |
| from transformers.utils import logging | |
| from transformers.models.auto import CONFIG_MAPPING, AutoConfig | |
| from .configuration_minimax_text_01 import MiniMaxText01Config | |
| class MiniMaxVL01Config(PretrainedConfig): | |
| r""" | |
| This is the configuration class to store the configuration of a [`MiniMaxVL01ForConditionalGeneration`]. It is used to instantiate an | |
| MiniMaxVL01 model according to the specified arguments, defining the model architecture. Instantiating a configuration | |
| with the defaults will yield a similar configuration to that of the MiniMaxVL01. | |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | |
| documentation from [`PretrainedConfig`] for more information. | |
| Args: | |
| vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): | |
| The config object or dictionary of the vision backbone. | |
| text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MiniMaxText01Config`): | |
| The config object or dictionary of the text backbone. | |
| ignore_index (`int`, *optional*, defaults to -100): | |
| The ignore index for the loss function. | |
| image_token_index (`int`, *optional*, defaults to 32000): | |
| The image token index to encode the image prompt. | |
| projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): | |
| The activation function used by the multimodal projector. | |
| vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): | |
| The feature selection strategy used to select the vision feature from the vision backbone. | |
| Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. | |
| If `"full"`, the full vision features are used. | |
| vision_feature_layer (`int`, *optional*, defaults to -2): | |
| The index of the layer to select the vision feature. | |
| image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`): | |
| A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list | |
| of the form `(height, width)`. | |
| tie_word_embeddings (`bool`, *optional*, defaults to `False`): | |
| Whether the model's input and output word embeddings should be tied. | |
| image_seq_length (`int`, *optional*, defaults to 576): | |
| Sequence length of one image embedding. | |
| Example: | |
| ```python | |
| >>> from transformers import MiniMaxVL01ForConditionalGeneration, MiniMaxVL01Config, CLIPVisionConfig, MiniMaxText01Config | |
| >>> # Initializing a CLIP-vision config | |
| >>> vision_config = CLIPVisionConfig() | |
| >>> # Initializing a MiniMaxText01 config | |
| >>> text_config = MiniMaxText01Config() | |
| >>> # Initializing a MiniMaxVL01 style configuration | |
| >>> configuration = MiniMaxVL01Config(vision_config, text_config) | |
| >>> # Initializing a model from the MiniMaxVL01 style configuration | |
| >>> model = MiniMaxVL01ForConditionalGeneration(configuration) | |
| >>> # Accessing the model configuration | |
| >>> configuration = model.config | |
| ```""" | |
| model_type = "minimax_vl_01" | |
| def __init__( | |
| self, | |
| vision_config=None, | |
| text_config=None, | |
| ignore_index=-100, | |
| image_token_index=32000, | |
| projector_hidden_act="gelu", | |
| vision_feature_select_strategy="default", | |
| vision_feature_layer=-2, | |
| image_grid_pinpoints=None, | |
| tie_word_embeddings=False, | |
| image_seq_length=576, | |
| **kwargs, | |
| ): | |
| self.ignore_index = ignore_index | |
| self.image_token_index = image_token_index | |
| self.projector_hidden_act = projector_hidden_act | |
| self.image_seq_length = image_seq_length | |
| if vision_feature_select_strategy not in ["default", "full"]: | |
| raise ValueError( | |
| "vision_feature_select_strategy should be one of 'default', 'full'." | |
| f"Got: {vision_feature_select_strategy}" | |
| ) | |
| self.vision_feature_select_strategy = vision_feature_select_strategy | |
| self.vision_feature_layer = vision_feature_layer | |
| image_grid_pinpoints = ( | |
| image_grid_pinpoints | |
| if image_grid_pinpoints is not None | |
| else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]] | |
| ) | |
| self.image_grid_pinpoints = image_grid_pinpoints | |
| if isinstance(vision_config, dict): | |
| vision_config["model_type"] = ( | |
| vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model" | |
| ) | |
| vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) | |
| elif vision_config is None: | |
| vision_config = CONFIG_MAPPING["clip_vision_model"]( | |
| intermediate_size=4096, | |
| hidden_size=1024, | |
| patch_size=14, | |
| image_size=336, | |
| num_hidden_layers=24, | |
| num_attention_heads=16, | |
| vocab_size=32000, | |
| projection_dim=768, | |
| ) | |
| self.vision_config = vision_config | |
| if text_config is not None: | |
| assert "model_type" in text_config, "text_config model_type is not specified" | |
| text_config = MiniMaxText01Config(**text_config) | |
| else: | |
| text_config = MiniMaxText01Config() | |
| self.text_config = text_config | |
| super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) | |