u_st1 / modeling_baichuan.py
Salmonnn's picture
Upload folder using huggingface_hub
2a5fb5f verified
# Copyright 2023 Baichuan Inc. All Rights Reserved.
#
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Baichuan model."""
import os
import json
import math
from typing import List, Optional, Tuple, Union
from threading import Thread
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from transformers import PreTrainedModel
from transformers.activations import ACT2FN
from dataclasses import dataclass
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
# from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput
from transformers.modeling_outputs import CausalLMOutputWithPast, ModelOutput
from transformers.generation.utils import GenerationConfig
from transformers.utils import logging
from .configuration_baichuan import BaichuanConfig
from .audio_modeling_baichuan import BaichuanAudioEncoder, BaichuanAudioBridge
from .visual_modeling_baichuan import BaichuanVisualEncoder
from .processor_baichuan import BaichuanMMProcessor
from .moe import moe_matmul
# support model path contain point(.)
try:
# step1: copy relative imports to transformers_modules
from .generation_utils import build_chat_input, TextIterStreamer
from .sequence_parallel_utils import (
create_attention_layer,
get_sequence_parallel_size,
get_sequence_parallel_chunk,
)
except ModuleNotFoundError:
# step2: direct import from transformers_modules
try: # bypass check_imports failure
import sys
sys.path.append(os.path.dirname(__file__))
from generation_utils import build_chat_input, TextIterStreamer
from sequence_parallel_utils import (
create_attention_layer,
get_sequence_parallel_size,
get_sequence_parallel_chunk,
)
except Exception:
raise
logger = logging.get_logger(__name__)
def get_slopes(n):
def get_slopes_power_of_2(n):
start = (2 ** (-2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio ** i for i in range(n)]
if math.log2(n).is_integer():
return get_slopes_power_of_2(
n) # In the paper, we only train models that have 2^a heads for some a. This function has
else: # some good properties that only occur when the input is a power of 2. To maintain that even
closest_power_of_2 = 2 ** math.floor(
math.log2(n)) # when the number of heads is not a power of 2, we use this workaround.
return get_slopes_power_of_2(closest_power_of_2) + get_slopes(2 * closest_power_of_2)[0::2][
:n - closest_power_of_2]
@dataclass
class BaseModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
`config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
visual_idx_visual: Optional[Tuple[torch.FloatTensor, ...]] = None
visual_idx_semantic: Optional[Tuple[torch.FloatTensor, ...]] = None
class RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class RotaryEmbedding(torch.nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=5e6, device=None):
super().__init__()
# 修复RePE初始化精度问题 https://zhuanlan.zhihu.com/p/678963442
# DeepSpeed 会 Hack torch.arange 强制在 GPU 上运行,这里使用原生的 torch.arange
try:
import deepspeed
self.arange = deepspeed.runtime.zero.partition_parameters._orig_torch_arange
except:
self.arange = torch.arange
self.inv_freq = 1.0 / (base ** (self.arange(0, dim, 2).float().to(device) / dim))
self.max_seq_len_cached = max_position_embeddings
t = self.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
freqs = torch.outer(t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1)
self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32)
self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
if seq_len > self.max_seq_len_cached:
self.max_seq_len_cached = seq_len
t = self.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
freqs = torch.outer(t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1)
self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32).to(x.device)
self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32).to(x.device)
return (
self.cos_cached[:, :, :seq_len, ...].to(torch.float32).to(x.device),
self.sin_cached[:, :, :seq_len, ...].to(torch.float32).to(x.device),
)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos_, sin_, position_ids):
cos = cos_.squeeze(1).squeeze(0) # [seq_len, dim]
sin = sin_.squeeze(1).squeeze(0) # [seq_len, dim]
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
q_embed = (q.float() * cos) + (rotate_half(q.float()) * sin)
k_embed = (k.float() * cos) + (rotate_half(k.float()) * sin)
return q_embed.to(q.dtype), k_embed.to(k.dtype)
class MLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
):
super().__init__()
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
class Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: BaichuanConfig, is_sparse=False):
super().__init__()
self.config = config
self.position_embedding_type = config.position_embedding_type.lower()
self.num_kv_heads = config.num_key_value_heads
self.head_dim = config.head_dim
self.hidden_size = config.num_attention_heads * self.head_dim
self.hidden_kv_size = self.num_kv_heads * self.head_dim
if is_sparse:
self.num_heads = config.sparse_attention_heads
assert self.num_kv_heads == config.num_attention_heads
self.W_pack = nn.Linear(self.hidden_size, 3 * self.num_heads * self.head_dim, bias=config.attention_qkv_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
else:
self.num_heads = config.num_attention_heads
if self.config.attention_qkv_pack:
self.W_pack = nn.Linear(config.hidden_size, self.hidden_size + self.hidden_kv_size * 2, bias=config.attention_qkv_bias)
if config.moe:
self.moe_W_pack = nn.Linear(config.hidden_size, self.hidden_size + self.hidden_kv_size * 2, bias=False)
else:
self.q_proj = nn.Linear(config.hidden_size, self.hidden_size, bias=config.attention_qkv_bias)
self.k_proj = nn.Linear(config.hidden_size, self.hidden_kv_size, bias=config.attention_qkv_bias)
self.v_proj = nn.Linear(config.hidden_size, self.hidden_kv_size, bias=config.attention_qkv_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, config.hidden_size, bias=False)
if config.moe:
self.moe_o_proj = nn.Linear(self.num_heads * self.head_dim, config.hidden_size, bias=False)
if self.position_embedding_type == 'rope':
self.rotary_emb = RotaryEmbedding(
dim=self.head_dim,
max_position_embeddings=config.max_position_embeddings,
base=config.get_rotary_base()
)
elif self.position_embedding_type == 'alibi':
self.alibi_slopes = get_slopes(self.num_heads)
self.attention = create_attention_layer(self.hidden_size, self.num_heads, self.head_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def _repeat_kv(self, hidden_states: torch.Tensor, num_heads: int) -> torch.Tensor:
assert hidden_states.size(1) <= num_heads and num_heads % hidden_states.size(1) == 0
return repeat_kv(hidden_states, num_heads // hidden_states.size(1))
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
seqlens: Optional[torch.IntTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
group_index=None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len = hidden_states.shape[:2]
if self.config.attention_qkv_pack:
if self.config.moe and group_index is not None:
proj = moe_matmul(hidden_states, [self.W_pack.weight, self.moe_W_pack.weight], group_index, lambda x, y: torch.einsum('bd,ld->bl', x, y))
if self.config.attention_qkv_bias:
proj += self.W_pack.bias
else:
proj = self.W_pack(hidden_states)
query_states, key_states, value_states = proj.split([self.hidden_size, self.hidden_kv_size, self.hidden_kv_size], dim=-1)
else:
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
# (B, S, hidden_size) -> (B, num_heads, S, head_size)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
# (B, S, hidden_size) -> (B, num_kv_heads, S, head_size)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
if self.position_embedding_type == 'rope':
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len * get_sequence_parallel_size())
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin,
get_sequence_parallel_chunk(position_ids)
)
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
# repeat k/v heads if n_kv_heads < n_heads
key_states = self._repeat_kv(key_states, query_states.size(1))
value_states = self._repeat_kv(value_states, query_states.size(1))
if seqlens is not None:
seqlens = seqlens.to(dtype=torch.int32)
max_seqlen = (seqlens[1:] - seqlens[:-1]).max().item()
if self.position_embedding_type == 'alibi':
alibi_slopes = torch.tensor(self.alibi_slopes, dtype=torch.float32).to(query_states.device)
else:
alibi_slopes = None
attn_output = self.attention(
query_states, key_states, value_states, seqlens, seqlens,
max_seqlen, max_seqlen, causal=True, alibi_slopes=alibi_slopes, use_flash=True)
else:
attn_output = self.attention(
query_states, key_states, value_states, attn_mask=attention_mask, use_flash=False)
attn_output = attn_output.reshape(bsz, q_len, -1)
if not self.config.moe or group_index is None:
attn_output = self.o_proj(attn_output)
else:
attn_output = moe_matmul(attn_output, [self.o_proj.weight, self.moe_o_proj.weight], group_index, lambda x, y: torch.einsum('bd,ld->bl', x, y))
return attn_output, None, past_key_value
class DecoderLayer(nn.Module):
def __init__(self, config: BaichuanConfig, is_sparse=False):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Attention(config=config, is_sparse=is_sparse)
self.mlp = MLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
seqlens: Optional[torch.IntTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
group_index=None,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
seqlens=seqlens,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
group_index=group_index,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
class BaichuanPreTrainedModel(PreTrainedModel):
config_class = BaichuanConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["DecoderLayer"]
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv1d) or isinstance(module, nn.ConvTranspose1d):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm) or isinstance(module, nn.GroupNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, RMSNorm):
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BaichuanModel):
module.gradient_checkpointing = value
class BaichuanModel(BaichuanPreTrainedModel):
def __init__(self, config: BaichuanConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.merge_size = 1
if config.audio_config.enable:
self.audio_model = BaichuanAudioEncoder(config.audio_config)
self.audio_bridge_model = BaichuanAudioBridge(config)
if config.visual_config.enable:
# self.visual_model = BaichuanVisualEncoder(config.visual_config)
self.visual_model = BaichuanVisualEncoder(config=config.visual_config.config_path)
self.merge_size = max(config.visual_config.merge_size, self.merge_size)
if config.video_config.enable: # in case 没有visual_config而只有video_config
if not config.visual_config.enable:
self.visual_model = BaichuanVisualEncoder(config.video_config)
self.merge_size = max(config.video_config.merge_size, self.merge_size)
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.projector1 = nn.Sequential(nn.Linear(2*config.visual_config.hidden_size, config.hidden_size), nn.GELU(), nn.Linear(config.hidden_size, config.hidden_size))
self.layers = nn.ModuleList([
DecoderLayer(config, is_sparse=layer_idx in config.sparse_attention_layers)
for layer_idx in range(config.num_hidden_layers)
])
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = True
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def get_multimodal_mask(self, input_ids, pad_token_id, special_token_list):
'''
获取任意模态的特殊mask, 包含以下
1. pad mask 表示文本中图像/语音/视频模态提前留出的token位置
2. special token mask 特殊token 例如对理解模型<start> <end> 不需要next token prediction
3. embedding mask / lm_head mask 标记出特殊token在embedding中的mask
'''
pad_mask = torch.eq(input_ids, pad_token_id)
sp_mask = torch.zeros_like(input_ids, dtype=torch.bool)
lm_head_mask = torch.zeros([self.config.vocab_size, 1], dtype=torch.bool)
for sp_id in special_token_list:
sp_mask = torch.logical_or(sp_mask, torch.eq(input_ids, sp_id))
lm_head_mask[sp_id, 0] = True
return pad_mask, sp_mask, lm_head_mask
def get_audio_embed(
self,
input_ids,
text_embedding, # 1. self.embed_tokens(input_ids) 2. 其他模态结果
features, # list of tensors
encoder_length,
bridge_length,
group_index=None, # 某种模态的编号 for MoE
):
pad_mask, sp_mask, _ = self.get_multimodal_mask(input_ids, self.config.audio_config.audio_pad_token_id, self.config.multimodal_special_token_list)
if features is None or len(features) <= 0 : # 空list or None 保证梯度回传
features, encoder_length, bridge_length = self.audio_model.fake_input(input_ids.device)
fake_input = True
else:
fake_input = False
audio_embed = self.audio_model(features, encoder_length)
audio_embed = self.audio_bridge_model(audio_embed, bridge_length) # (?, d)
if not self.training: # 推理支持auto map 把多模态模块输出和input_ids 统一到一个device
audio_embed = audio_embed.to(input_ids.device)
if not fake_input: # 检查多模态token 和 pad mask数量一致 (不正确的截断会导致该问题)
assert pad_mask.sum() == audio_embed.shape[0]
else:
assert pad_mask.sum() <= 0 # 0 vs 1
if group_index is None:
group_index = pad_mask.to(torch.int32)
else:
current_index = torch.max(group_index) + 1
group_index += pad_mask.to(torch.int32) * current_index # 假设模态无重叠
# 合并 当前模态embeddings 和text embeddings
input_ids = torch.where(pad_mask, torch.cumsum(pad_mask.view(-1).to(input_ids), dim=0).view(input_ids.shape)-1, input_ids)
if self.config.train_multimodal_special_tokens_only and self.training:
# 仅special token传梯度到embedding weight, 保证LLM部分不变
# 注意: 多种模态之间special token list应该共享,否则会有部分被stop gradient
sp_mask = sp_mask.unsqueeze(-1).to(text_embedding)
text_embedding = (1 - sp_mask) * text_embedding.detach() + sp_mask * text_embedding
text_embedding = (1 - pad_mask.to(text_embedding)).unsqueeze(-1) * text_embedding # pad token位置填0 (不传梯度)
multimodal_embedding = torch.embedding(audio_embed, input_ids * pad_mask) # 非 pad token 位置填idx=0位置结果
multimodal_embedding = pad_mask.to(multimodal_embedding).unsqueeze(-1) * multimodal_embedding # 非pad token 位置填0
final_embedding = multimodal_embedding.to(text_embedding) + text_embedding
return final_embedding, group_index # group_index 不传None 防止MoE部分参数无梯度
def get_visual_embed(
self,
input_ids,
text_embedding, # 1. self.embed_tokens(input_ids) 2. 其他模态结果
images,
group_index, # 某种模态的编号 for MoE
images_grid
):
# TODO 与get_audio_embed合并重复功能 减少冗余代码
pad_mask, sp_mask, _ = self.get_multimodal_mask(input_ids, self.config.visual_config.image_pad_token_id, self.config.multimodal_special_token_list)
if images is None or len(images) <= 0 : # 空list or None 保证梯度回传
images = self.visual_model.fake_input(input_ids, self.merge_size)
images_grid = [[1, 1]]
fake_input = True
else:
fake_input = False
if self.config.visual_config.feature_mode == 'anyres':
pass
else:
with torch.no_grad():
images = torch.stack(images, dim=0)
visual_embed_visual, visual_embed_semantic, visual_idx_visual, visual_idx_semantic = self.visual_model(images)
if len(visual_idx_visual.shape) == 2:
print("!!! unsqueeze visual_idx_visual, get_visual_embed")
visual_idx_visual = visual_idx_visual.unsqueeze(0)
if len(visual_idx_semantic.shape) == 2:
print("!!! unsqueeze visual_idx_semantic, get_visual_embed")
visual_idx_semantic = visual_idx_semantic.unsqueeze(0)
visual_embed_visual = visual_embed_visual.reshape(-1, visual_embed_visual.shape[-1]) # [B*256, 1024]
visual_embed_semantic = visual_embed_semantic.reshape(-1, visual_embed_semantic.shape[-1]) # [B*256, 1024]
visual_embed = torch.cat((visual_embed_semantic, visual_embed_visual), dim=-1)
visual_embed = self.projector1(visual_embed)
if not self.training: # 推理支持auto map 把多模态模块输出和input_ids 统一到一个device
visual_embed = visual_embed.to(input_ids.device)
if not fake_input: # 检查多模态token 和 pad mask数量一致 (不正确的截断会导致该问题)
assert pad_mask.sum() == visual_embed.shape[0], '{} != {} images.shape={} input_ids.tolist()={}'.format(pad_mask.sum(), visual_embed.shape[0], images.shape, input_ids.tolist())
else:
assert pad_mask.sum() <= 0, '{} != {}'.format(pad_mask.sum(), visual_embed.shape[0])
# 合并 当前模态embeddings 和text embeddings
input_ids = torch.where(pad_mask, torch.cumsum(pad_mask.view(-1).to(input_ids), dim=0).view(input_ids.shape)-1, input_ids)
if self.config.train_multimodal_special_tokens_only and self.training:
# 仅special token传梯度到embedding weight, 保证LLM部分不变
# 注意: 多种模态之间special token list应该共享,否则会有部分被stop gradient
sp_mask = sp_mask.unsqueeze(-1).to(text_embedding)
text_embedding = (1 - sp_mask) * text_embedding.detach() + sp_mask * text_embedding
text_embedding = (1 - pad_mask.to(text_embedding)).unsqueeze(-1) * text_embedding # pad token位置填0 (不传梯度)
multimodal_embedding = torch.embedding(visual_embed, input_ids * pad_mask) # 非 pad token 位置填idx=0位置结果
multimodal_embedding = pad_mask.to(multimodal_embedding).unsqueeze(-1) * multimodal_embedding # 非pad token 位置填0
final_embedding = multimodal_embedding.to(text_embedding) + text_embedding
if group_index is None:
group_index = pad_mask.to(torch.int32)
else:
current_index = torch.max(group_index) + 1
group_index += pad_mask.to(torch.int32) * current_index # 假设模态无重叠
return final_embedding, group_index, visual_idx_visual, visual_idx_semantic # group_index 不传None 防止MoE部分参数无梯度
def get_video_embed(
self,
input_ids,
text_embedding, # 1. self.embed_tokens(input_ids) 2. 其他模态结果
images,
group_index, # 某种模态的编号 for MoE
images_grid
):
# TODO 与get_audio_embed合并重复功能 减少冗余代码
pad_mask, sp_mask, _ = self.get_multimodal_mask(input_ids, self.config.video_config.video_place_token_id, self.config.multimodal_special_token_list)
if images is None or len(images) <= 0 : # 空list or None 保证梯度回传
images = self.visual_model.fake_input(input_ids.device)
images_grid = [[1, 1]]
fake_input = True
else:
fake_input = False
images = torch.cat(images, dim=0)
visual_embed = self.visual_model(images)[self.visual_model.config.layer_idx] # 去掉class token
visual_embed = self.visual_bridge_model(visual_embed).view(-1, text_embedding.shape[-1]) # (?, d)
if not self.training: # 推理支持auto map 把多模态模块输出和input_ids 统一到一个device
visual_embed = visual_embed.to(input_ids.device)
if not fake_input: # 检查多模态token 和 pad mask数量一致 (不正确的截断会导致该问题)
assert pad_mask.sum() == visual_embed.shape[0], '{} != {}'.format(pad_mask.sum(), visual_embed.shape[0])
assert pad_mask.sum() == visual_embed.shape[0], '{} != {}'.format(pad_mask.sum(), visual_embed.shape[0])
else:
assert pad_mask.sum() <= 0, '{} != {}'.format(pad_mask.sum(), visual_embed.shape[0])
# 合并 当前模态embeddings 和text embeddings
input_ids = torch.where(pad_mask, torch.cumsum(pad_mask.view(-1).to(input_ids), dim=0).view(input_ids.shape)-1, input_ids)
if self.config.train_multimodal_special_tokens_only and self.training:
# 仅special token传梯度到embedding weight, 保证LLM部分不变
# 注意: 多种模态之间special token list应该共享,否则会有部分被stop gradient
sp_mask = sp_mask.unsqueeze(-1).to(text_embedding)
text_embedding = (1 - sp_mask) * text_embedding.detach() + sp_mask * text_embedding
text_embedding = (1 - pad_mask.to(text_embedding)).unsqueeze(-1) * text_embedding # pad token位置填0 (不传梯度)
multimodal_embedding = torch.embedding(visual_embed, input_ids * pad_mask) # 非 pad token 位置填idx=0位置结果
multimodal_embedding = pad_mask.to(multimodal_embedding).unsqueeze(-1) * multimodal_embedding # 非pad token 位置填0
final_embedding = multimodal_embedding.to(text_embedding) + text_embedding
if group_index is None:
group_index = pad_mask.to(torch.int32)
else:
current_index = torch.max(group_index) + 1
group_index += pad_mask.to(torch.int32) * current_index # 假设模态无重叠
return final_embedding, group_index # group_index 不传None 防止MoE部分参数无梯度
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
seqlens: Optional[torch.IntTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
audios: Optional[List|torch.Tensor] = None,
encoder_length: Optional[torch.Tensor] = None,
bridge_length: Optional[torch.Tensor] = None,
images: Optional[List|torch.Tensor] = None,
images_grid: Optional[List|torch.Tensor] = None,
videos: Optional[List|torch.Tensor] = None,
videos_grid: Optional[List|torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
# image_ids: Optional[List] = [],
image_ids_visual: Optional[List] = [],
image_ids_semantic: Optional[List] = [],
gen_mode: Optional[bool] = False
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
group_index = None
if inputs_embeds is None:
sp_input_ids = get_sequence_parallel_chunk(input_ids) # torch.Size([1, 3])
if image_ids_visual is None or len(image_ids_visual) <= 0:
inputs_embeds = self.embed_tokens(sp_input_ids)
if self.config.audio_config.enable:
inputs_embeds, group_index = self.get_audio_embed(sp_input_ids, inputs_embeds, audios, encoder_length, bridge_length)
if self.config.visual_config.enable:
inputs_embeds, group_index, visual_idx_visual, visual_idx_semantic = self.get_visual_embed(sp_input_ids, inputs_embeds, images, group_index, images_grid)
if self.config.video_config.enable:
inputs_embeds, group_index = self.get_video_embed(sp_input_ids, inputs_embeds, videos, group_index, videos_grid)
else:
image_ids_end = image_ids_visual[-1].clone() # torch.Size([1, 1, 4])
inputs_embeds_visual = self.visual_model.vision_tower.rqtransformer_visual.embed_with_model_aux(image_ids_end, self.visual_model.vision_tower.rqvaesiglip, mode="visual") # torch.Size([1, 1, 4, 1024])
inputs_embeds_visual = torch.cumsum(inputs_embeds_visual, dim=-2)[:,:,-1,:] # torch.Size([1, 1, 1024])
image_ids_end = image_ids_semantic[-1].clone() # torch.Size([1, 1, 4])
inputs_embeds_semantic = self.visual_model.vision_tower.rqtransformer_semantic.embed_with_model_aux(image_ids_end, self.visual_model.vision_tower.rqvaesiglip, mode="semantic") # torch.Size([1, 1, 4, 1024])
inputs_embeds_semantic = torch.cumsum(inputs_embeds_semantic, dim=-2)[:,:,-1,:] # torch.Size([1, 1, 1024])
inputs_embeds = torch.cat((inputs_embeds_semantic, inputs_embeds_visual), dim=-1)
inputs_embeds = self.projector1(inputs_embeds) # torch.Size([1, 1, 2048])
visual_idx_visual, visual_idx_semantic = None, None
# if not gen_mode:
# inputs_embeds = self.embed_tokens(sp_input_ids)
# if self.config.audio_config.enable:
# inputs_embeds, group_index = self.get_audio_embed(sp_input_ids, inputs_embeds, audios, encoder_length, bridge_length)
# if self.config.visual_config.enable:
# inputs_embeds, group_index, visual_idx_visual, visual_idx_semantic = self.get_visual_embed(sp_input_ids, inputs_embeds, images, group_index, images_grid) # 注意更新group index
# if self.config.video_config.enable:
# inputs_embeds, group_index = self.get_video_embed(sp_input_ids, inputs_embeds, videos, group_index, videos_grid) # 注意更新group index
# else:
# if sp_input_ids[-1, -1] == self.config.visual_config.image_start_token_id: # 输入的是文本
# inputs_embeds = self.embed_tokens(sp_input_ids) # torch.Size([1, 3, 2048])
# else: # 输入的是图像
# image_ids_end = image_ids_visual[-1].clone() # torch.Size([1, 1, 4])
# inputs_embeds_visual = self.visual_model.vision_tower.rqtransformer_visual.embed_with_model_aux(image_ids_end,
# self.visual_model.vision_tower.rqvaesiglip,
# mode="visual") # torch.Size([1, 1, 4, 1024])
# inputs_embeds_visual = torch.cumsum(inputs_embeds_visual, dim=-2)[:,:,-1,:] # torch.Size([1, 1, 1024])
# image_ids_end = image_ids_semantic[-1].clone() # torch.Size([1, 1, 4])
# inputs_embeds_semantic = self.visual_model.vision_tower.rqtransformer_semantic.embed_with_model_aux(image_ids_end,
# self.visual_model.vision_tower.rqvaesiglip,
# mode="semantic") # torch.Size([1, 1, 4, 1024])
# inputs_embeds_semantic = torch.cumsum(inputs_embeds_semantic, dim=-2)[:,:,-1,:] # torch.Size([1, 1, 1024])
# inputs_embeds = torch.cat((inputs_embeds_semantic, inputs_embeds_visual), dim=-1)
# inputs_embeds = self.projector1(inputs_embeds) # torch.Size([1, 1, 2048])
# # visual_idx = None
# visual_idx_visual, visual_idx_semantic = None, None
if seqlens is not None and seqlens.ndim == 2:
# batch multi-pack 样本拉平
cu_seqlens = []
offset, seqlen = 0, seqlens.size(1)
for lens in seqlens:
cu_seqlens.append(offset)
cu_seqlens.extend((lens[(lens > 0) & (lens < seqlen)] + offset).tolist())
offset += seqlen
cu_seqlens.append(offset)
seqlens = torch.tensor(cu_seqlens, dtype=seqlens.dtype, device=seqlens.device)
elif seqlens is None and self.training:
# 兼容预训练场景, 此时 seqlens=None, 默认 maxlength
seqlens = torch.arange(
end=input_ids.size(0) + 1,
dtype=torch.int32,
device=input_ids.device
) * input_ids.size(1)
if seqlens is not None:
attention_mask = None # unset attention_mask to save memory
if seqlens is None and attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
)
if attention_mask is not None:
attention_mask = _prepare_4d_causal_attention_mask(
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
)
# embed positions
hidden_states = inputs_embeds
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, False, group_index)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
position_ids,
seqlens,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
seqlens=seqlens,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
group_index=group_index,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
# visual_idx=visual_idx
visual_idx_visual=visual_idx_visual,
visual_idx_semantic=visual_idx_semantic
)
class NormHead(nn.Module):
def __init__(self, hidden_size, vocab_size, bias=False):
super().__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.weight = nn.Parameter(torch.empty((self.vocab_size, self.hidden_size)))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
def forward(self, hidden_states, mask=None):
norm_weight = nn.functional.normalize(self.weight)
if mask is not None:
mask = mask.to(norm_weight)
norm_weight = norm_weight * mask + (1 - mask) * norm_weight.detach()
return nn.functional.linear(hidden_states, norm_weight)
def extra_repr(self) -> str:
return f'in_features={self.hidden_size}, out_features={self.vocab_size}'
@dataclass
class BaichuanMMCausalLMOutputWithPast(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
text_logits: Optional[torch.FloatTensor] = None
visual_logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
text_nt_loss: Optional[torch.FloatTensor] = None
image_nt_loss_visual: Optional[torch.FloatTensor] = None
image_loss_count_visual: Optional[torch.FloatTensor] = None
image_nt_loss_semantic: Optional[torch.FloatTensor] = None
image_loss_count_semantic: Optional[torch.FloatTensor] = None
class BaichuanForCausalLM(BaichuanPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.model = BaichuanModel(config)
if config.use_norm_head:
self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False)
else:
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.projector_semantic = nn.Sequential(nn.Linear(config.hidden_size, config.visual_config.depth_head_embed_dim), nn.GELU(), nn.Linear(config.visual_config.depth_head_embed_dim, config.visual_config.depth_head_embed_dim))
self.projector_visual = nn.Sequential(nn.Linear(config.hidden_size, config.visual_config.depth_head_embed_dim), nn.GELU(), nn.Linear(config.visual_config.depth_head_embed_dim, config.visual_config.depth_head_embed_dim))
# Initialize weights and apply final processing
self.post_init()
def bind_processor(self, tokenizer, **kwargs):
self.processor = BaichuanMMProcessor(
tokenizer=tokenizer,
config=self.config,
**kwargs,
)
return self.processor
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
seqlens: Optional[torch.IntTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
audios: Optional[List|torch.Tensor] = None,
encoder_length: Optional[torch.Tensor] = None,
bridge_length: Optional[torch.Tensor] = None,
images: Optional[torch.Tensor] = None,
images_grid: Optional[torch.Tensor] = None,
videos: Optional[torch.Tensor] = None,
videos_grid: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
# image_ids: Optional[List] = [],
image_ids_visual: Optional[List] = [],
image_ids_semantic: Optional[List] = [],
gen_mode: Optional[bool] = False,
cfg: Optional[float] = None
) -> Union[Tuple, CausalLMOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
_, sp_mask, _ = self.model.get_multimodal_mask(input_ids, self.config.visual_config.image_pad_token_id, self.config.multimodal_special_token_list)
# TODO 放开部分可学习的special token lmhead参数
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
seqlens=seqlens,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
audios=audios,
encoder_length=encoder_length,
bridge_length=bridge_length,
images=images,
images_grid=images_grid,
videos=videos,
videos_grid=videos_grid,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
# image_ids=image_ids,
image_ids_visual=image_ids_visual,
image_ids_semantic=image_ids_semantic,
gen_mode=gen_mode
)
hidden_states = outputs[0] # torch.Size([2, 16384, 3584])
# visual_idx = outputs.visual_idx
visual_idx_visual = outputs.visual_idx_visual
visual_idx_semantic = outputs.visual_idx_semantic
# 部分可学习的special token放开lm head梯度
special_with_loss_list = list(set(self.config.multimodal_special_token_list) - set(self.config.multimodal_special_token_no_loss_list))
pad_mask, sp_with_loss_mask, lm_head_mask = self.model.get_multimodal_mask(input_ids, self.config.visual_config.image_pad_token_id, special_with_loss_list)
bs, seq_len, _ = hidden_states.shape
if self.training:
B, IMAGE_TOKEN, DEPTH = visual_idx_visual.shape # 58, 256, 4
if self.config.train_multimodal_special_tokens_only and self.training and len(special_with_loss_list) > 0:
if self.config.use_norm_head:
logits = self.lm_head(hidden_states, mask=lm_head_mask)
else:
lm_head_mask = lm_head_mask.to(self.lm_head.weight)
norm_weight = self.lm_head.weight * lm_head_mask + (1 - lm_head_mask) * self.lm_head.weight.detach()
logits = torch.einsum('bsd,ld->bsl', hidden_states, norm_weight)
else:
logits = self.lm_head(hidden_states.reshape(bs, -1, self.config.hidden_size)) # torch.Size([bs, 4096, 152064])
if pad_mask.sum() <= 0:
image_hidden_states = torch.zeros([B, IMAGE_TOKEN, self.config.hidden_size], dtype=hidden_states.dtype, device=hidden_states.device)
else:
shifted_pad_mask = torch.cat((pad_mask[:, 1:], torch.zeros(bs, 1).to(pad_mask)), dim=1)
image_hidden_states = hidden_states[shifted_pad_mask == 1].reshape(B, IMAGE_TOKEN, self.config.hidden_size) # 58, 256, 2048
image_hidden_states_visual = self.projector_visual(image_hidden_states) # 58, 256, 2560
image_hidden_states_semantic = self.projector_semantic(image_hidden_states) # 58, 256, 2560
visual_logits_visual = self.model.visual_model.vision_tower.rqtransformer_visual(image_hidden_states_visual, visual_idx_visual, self.model.visual_model.vision_tower.rqvaesiglip, mode="visual")
visual_logits_semantic = self.model.visual_model.vision_tower.rqtransformer_semantic(image_hidden_states_semantic, visual_idx_semantic, self.model.visual_model.vision_tower.rqvaesiglip, mode="semantic")
else:
# if gen_mode
if (image_ids_visual is not None and len(image_ids_visual) > 0) or (len(image_ids_visual) <= 0 and input_ids[-1, -1] == self.config.visual_config.image_start_token_id and input_ids[-1, -2] == self.config.visual_config.image_gen_start_token_id):
self.model.visual_model.vision_tower.rqtransformer_visual.eval()
self.model.visual_model.vision_tower.rqtransformer_semantic.eval()
hidden_state = hidden_states[:, -1, :]
if len(hidden_state.shape) == 2:
hidden_state = hidden_state.unsqueeze(1)
hidden_state_visual = self.projector_visual(hidden_state)
hidden_state_semantic = self.projector_semantic(hidden_state)
image_hidden_state_visual, code_visual = self.model.visual_model.vision_tower.rqtransformer_visual.generate(hidden_state_visual,
self.model.visual_model.vision_tower.rqvaesiglip,
cfg, mode="visual") # image_hidden_state: 输入4096,输出1024
image_hidden_state_semantic, code_semantic = self.model.visual_model.vision_tower.rqtransformer_semantic.generate(hidden_state_semantic,
self.model.visual_model.vision_tower.rqvaesiglip,
cfg, mode="semantic") # image_hidden_state: 输入4096,输出1024
image_ids_visual.append(code_visual)
image_ids_semantic.append(code_semantic)
logits = self.lm_head(hidden_states.reshape(bs, -1, self.config.hidden_size))
# if self.config.train_multimodal_special_tokens_only and self.training and len(special_with_loss_list) > 0:
# if self.config.use_norm_head:
# logits = self.lm_head(hidden_states, mask=lm_head_mask)
# else:
# lm_head_mask = lm_head_mask.to(self.lm_head.weight)
# norm_weight = self.lm_head.weight * lm_head_mask + (1 - lm_head_mask) * self.lm_head.weight.detach()
# logits = torch.einsum('bsd,ld->bsl', hidden_states, norm_weight)
# else:
# bs, seq_len, _ = hidden_states.shape
# # B, IMAGE_TOKEN, DEPTH = visual_idx_visual.shape # 58, 256, 4
# if self.training:
# B, IMAGE_TOKEN, DEPTH = visual_idx_visual.shape # 58, 256, 4
# logits = self.lm_head(hidden_states[pad_mask == 0].reshape(bs, -1, self.config.hidden_size)) # torch.Size([bs, 4096, 152064])
# if pad_mask.sum() <= 0:
# image_hidden_states = torch.zeros([B, IMAGE_TOKEN, self.config.hidden_size], dtype=hidden_states.dtype, device=hidden_states.device)
# else:
# shifted_pad_mask = torch.cat((pad_mask[:, 1:], torch.zeros(bs, 1).to(pad_mask)), dim=1)
# image_hidden_states = hidden_states[shifted_pad_mask == 1].reshape(B, IMAGE_TOKEN, self.config.hidden_size) # 58, 256, 2048
# image_hidden_states_visual = self.projector_visual(image_hidden_states) # 58, 256, 2560
# image_hidden_states_semantic = self.projector_semantic(image_hidden_states) # 58, 256, 2560
# visual_logits_visual = self.model.visual_model.vision_tower.rqtransformer_visual(image_hidden_states_visual, visual_idx_visual, self.model.visual_model.vision_tower.rqvaesiglip, mode="visual") # modify
# visual_logits_semantic = self.model.visual_model.vision_tower.rqtransformer_semantic(image_hidden_states_semantic, visual_idx_semantic, self.model.visual_model.vision_tower.rqvaesiglip, mode="semantic") # modify
# else:
# if gen_mode:
# logits = self.lm_head(hidden_states.reshape(bs, -1, self.config.hidden_size))
# self.model.visual_model.vision_tower.rqtransformer_visual.eval()
# self.model.visual_model.vision_tower.rqtransformer_semantic.eval()
# if input_ids[-1, -1] == self.config.visual_config.image_start_token_id: # 输入的是文本
# # hidden_states.shape=1,3,2048
# hidden_state = hidden_states[:, -1, :]
# if len(hidden_state.shape) == 2:
# hidden_state = hidden_state.unsqueeze(1)
# hidden_state_visual = self.projector_visual(hidden_state)
# hidden_state_semantic = self.projector_semantic(hidden_state)
# image_hidden_state_visual, code_visual = self.model.visual_model.vision_tower.rqtransformer_visual.generate(hidden_state_visual,
# self.model.visual_model.vision_tower.rqvaesiglip,
# cfg, mode="visual") # image_hidden_state: 输入4096,输出1024
# image_hidden_state_semantic, code_semantic = self.model.visual_model.vision_tower.rqtransformer_semantic.generate(hidden_state_semantic,
# self.model.visual_model.vision_tower.rqvaesiglip,
# cfg, mode="semantic") # image_hidden_state: 输入4096,输出1024
# else:
# # hidden_states.shape=1,1,2048
# hidden_states_visual = self.projector_visual(hidden_states)
# hidden_states_semantic = self.projector_semantic(hidden_states)
# image_hidden_state_visual, code_visual = self.model.visual_model.vision_tower.rqtransformer_visual.generate(hidden_states_visual,
# self.model.visual_model.vision_tower.rqvaesiglip,
# cfg, mode="visual") # image_hidden_state: 输入4096,输出1024
# image_hidden_state_semantic, code_semantic = self.model.visual_model.vision_tower.rqtransformer_semantic.generate(hidden_states_semantic,
# self.model.visual_model.vision_tower.rqvaesiglip,
# cfg, mode="semantic") # image_hidden_state: 输入4096,输出1024
# image_ids_visual.append(code_visual)
# image_ids_semantic.append(code_semantic)
# # logits = self.lm_head(hidden_states.reshape(bs, -1, self.config.hidden_size))
# else:
# logits = self.lm_head(hidden_states.reshape(bs, -1, self.config.hidden_size))
loss = torch.tensor(0, device=hidden_states.device, dtype=hidden_states.dtype)
text_nt_loss = torch.tensor(0, device=hidden_states.device, dtype=hidden_states.dtype)
image_nt_loss_visual = torch.tensor(0, device=hidden_states.device, dtype=hidden_states.dtype)
image_nt_loss_semantic = torch.tensor(0, device=hidden_states.device, dtype=hidden_states.dtype)
image_loss_count_visual = torch.tensor(0, device=hidden_states.device, dtype=hidden_states.dtype)
image_loss_count_semantic = torch.tensor(0, device=hidden_states.device, dtype=hidden_states.dtype)
visual_loss_list_visual = [torch.tensor(0, device=hidden_states.device, dtype=hidden_states.dtype) for _ in range(self.config.visual_config.block_size[-1])] # modify
visual_loss_list_semantic = [torch.tensor(0, device=hidden_states.device, dtype=hidden_states.dtype) for _ in range(self.config.visual_config.block_size[-1])] # modify
if labels is not None:
# text loss
shift_logits = logits[pad_mask == 0][..., :-1, :].contiguous()
shift_text_labels = labels[pad_mask == 0][..., 1:].contiguous()
valid_mask = torch.gt(shift_text_labels, -1)
_, sp_mask_, _ = self.model.get_multimodal_mask(shift_text_labels, self.config.visual_config.image_pad_token_id, self.config.multimodal_special_token_list)
_, sp_with_loss_mask_, _ = self.model.get_multimodal_mask(shift_text_labels, self.config.visual_config.image_pad_token_id, special_with_loss_list)
text_mask = torch.logical_and(valid_mask, torch.logical_not(sp_mask_))
valid_mask = torch.logical_or(torch.logical_and(valid_mask, torch.logical_not(sp_mask_)), sp_with_loss_mask_)
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_text_labels = shift_text_labels.view(-1).to(shift_logits.device)
flatten_loss = F.cross_entropy(shift_logits, shift_text_labels, ignore_index=-100, reduction='none')
text_flatten_loss = torch.masked_select(flatten_loss, text_mask.view(-1))
valid_flatten_loss = torch.masked_select(flatten_loss, valid_mask.view(-1))
# # text loss
# shift_logits = logits[..., :-1, :].contiguous()
# text_labels = labels[pad_mask == 0].reshape(bs, -1)
# shift_text_labels = text_labels[..., 1:].contiguous()
# text_valid_mask = torch.gt(shift_text_labels, -1)
# _, sp_mask_, _ = self.model.get_multimodal_mask(shift_text_labels, self.config.visual_config.image_pad_token_id, self.config.multimodal_special_token_list)
# text_valid_mask = torch.logical_and(text_valid_mask, torch.logical_not(sp_mask_))
# shift_logits = shift_logits.view(-1, self.config.vocab_size)
# shift_text_labels = shift_text_labels.view(-1).to(shift_logits.device)
# text_flatten_loss = F.cross_entropy(shift_logits, shift_text_labels, ignore_index=-100, reduction='none')
# text_flatten_loss = torch.masked_select(text_flatten_loss, text_valid_mask.view(-1))
# image loss
if pad_mask.sum() > 0:
visual_labels_original = labels[pad_mask == 1]
visual_valid_mask = visual_labels_original > 0
else:
visual_valid_mask = torch.zeros(visual_idx_visual[..., 0].view(-1).shape, dtype=valid_mask.dtype, device=valid_mask.device)
for i in range(len(visual_logits_visual)): # 长度为depth transformer深度。分别计算每层的损失,每层损失尺寸为 B*256
visual_loss_list_visual[i] = F.cross_entropy(visual_logits_visual[i], visual_idx_visual[..., i].view(-1), reduction='none') # visual_idx.shape = B, 256, 4
for i in range(len(visual_logits_semantic)): # 长度为depth transformer深度。分别计算每层的损失,每层损失尺寸为 B*256
visual_loss_list_semantic[i] = F.cross_entropy(visual_logits_semantic[i], visual_idx_semantic[..., i].view(-1), reduction='none') # visual_idx.shape = B, 256, 4
total_weight = sum(self.config.visual_config.visual_codebook_loss_weights)
visual_flatten_loss_visual = sum(w * loss for w, loss in zip(self.config.visual_config.visual_codebook_loss_weights, visual_loss_list_visual)) / total_weight # 加权取平均
visual_flatten_loss_visual = visual_flatten_loss_visual.to(text_flatten_loss.dtype).to(text_flatten_loss.device) # B*256
visual_flatten_loss_visual = torch.masked_select(visual_flatten_loss_visual, visual_valid_mask)
visual_flatten_loss_semantic = sum(w * loss for w, loss in zip(self.config.visual_config.visual_codebook_loss_weights, visual_loss_list_semantic)) / total_weight # 加权取平均
visual_flatten_loss_semantic = visual_flatten_loss_semantic.to(text_flatten_loss.dtype).to(text_flatten_loss.device) # B*256
visual_flatten_loss_semantic = torch.masked_select(visual_flatten_loss_semantic, visual_valid_mask)
# merge
loss = torch.mean(torch.cat((valid_flatten_loss, visual_flatten_loss_visual, visual_flatten_loss_semantic), dim=0))
text_nt_loss = torch.mean(text_flatten_loss).detach()
image_nt_loss_visual = torch.mean(visual_flatten_loss_visual).detach() if visual_flatten_loss_visual.numel() != 0 else image_nt_loss_visual
image_loss_count_visual = torch.tensor(1, device=hidden_states.device, dtype=hidden_states.dtype) if visual_flatten_loss_visual.numel() != 0 else image_loss_count_visual
image_nt_loss_semantic = torch.mean(visual_flatten_loss_semantic).detach() if visual_flatten_loss_semantic.numel() != 0 else image_nt_loss_semantic
image_loss_count_semantic = torch.tensor(1, device=hidden_states.device, dtype=hidden_states.dtype) if visual_flatten_loss_semantic.numel() != 0 else image_loss_count_semantic
# loss = torch.mean(torch.cat((text_flatten_loss, visual_flatten_loss_visual, visual_flatten_loss_semantic), dim=0))
# text_nt_loss = torch.mean(text_flatten_loss).detach()
# image_nt_loss_visual = torch.mean(visual_flatten_loss_visual).detach() if visual_flatten_loss_visual.numel() != 0 else image_nt_loss_visual
# image_loss_count_visual = torch.tensor(1, device=hidden_states.device, dtype=hidden_states.dtype) if visual_flatten_loss_visual.numel() != 0 else image_loss_count_visual
# image_nt_loss_semantic = torch.mean(visual_flatten_loss_semantic).detach() if visual_flatten_loss_semantic.numel() != 0 else image_nt_loss_semantic
# image_loss_count_semantic = torch.tensor(1, device=hidden_states.device, dtype=hidden_states.dtype) if visual_flatten_loss_semantic.numel() != 0 else image_loss_count_semantic
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return BaichuanMMCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
text_nt_loss=text_nt_loss,
image_nt_loss_visual=image_nt_loss_visual,
image_loss_count_visual=image_loss_count_visual,
image_nt_loss_semantic=image_nt_loss_semantic,
image_loss_count_semantic=image_loss_count_semantic
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
elif past_key_values is not None:
model_inputs = {"input_ids": input_ids}
else:
model_inputs = {"input_ids": input_ids,
"audios": kwargs.get("audios", None), "encoder_length": kwargs.get("encoder_length", None), "bridge_length": kwargs.get("bridge_length", None),
"images": kwargs.get("images", None),
"videos": kwargs.get("videos", None)
}
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
"images_grid": kwargs.get("images_grid"),
"videos_grid": kwargs.get("videos_grid")
}
)
return model_inputs
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
def chat(self, tokenizer, messages: List[dict], stream=False,
generation_config: Optional[GenerationConfig]=None):
generation_config = generation_config or self.generation_config
input_ids = build_chat_input(self, tokenizer, messages, generation_config.max_new_tokens)
if stream:
streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
Thread(target=self.generate, kwargs=dict(
inputs=input_ids, streamer=streamer,
generation_config=generation_config,
)).start()
return streamer
else:
outputs = self.generate(input_ids, generation_config=generation_config)
response = tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True)
return response