|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Inference-only Qwen2 model compatible with HuggingFace weights.""" |
|
import logging |
|
from typing import Any, Dict, Iterable, Optional, Tuple, Union, List |
|
|
|
import torch |
|
from torch import nn |
|
|
|
from sglang.srt.distributed import ( |
|
get_pp_group, |
|
get_tensor_model_parallel_rank, |
|
get_tensor_model_parallel_world_size, |
|
) |
|
from sglang.srt.layers.activation import SiluAndMul |
|
from sglang.srt.layers.layernorm import RMSNorm |
|
from sglang.srt.layers.linear import ( |
|
MergedColumnParallelLinear, |
|
QKVParallelLinear, |
|
RowParallelLinear, |
|
) |
|
from sglang.srt.layers.logits_processor import LogitsProcessor |
|
from sglang.srt.layers.pooler import Pooler, PoolingType |
|
from sglang.srt.layers.quantization.base_config import QuantizationConfig |
|
from sglang.srt.layers.radix_attention import RadixAttention |
|
from sglang.srt.layers.rotary_embedding import get_rope |
|
from sglang.srt.layers.utils import PPMissingLayer, get_layer_id |
|
from sglang.srt.layers.vocab_parallel_embedding import ( |
|
ParallelLMHead, |
|
VocabParallelEmbedding, |
|
) |
|
from sglang.srt.managers.schedule_batch import global_server_args_dict |
|
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, PPProxyTensors |
|
from sglang.srt.model_loader.weight_utils import ( |
|
default_weight_loader, |
|
kv_cache_scales_loader, |
|
) |
|
from sglang.srt.utils import add_prefix, make_layers |
|
|
|
Qwen2Config = None |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class Qwen2MLP(nn.Module): |
|
def __init__( |
|
self, |
|
hidden_size: int, |
|
intermediate_size: int, |
|
hidden_act: str, |
|
quant_config: Optional[QuantizationConfig] = None, |
|
prefix: str = "", |
|
) -> None: |
|
super().__init__() |
|
self.gate_up_proj = MergedColumnParallelLinear( |
|
hidden_size, |
|
[intermediate_size] * 2, |
|
bias=False, |
|
quant_config=quant_config, |
|
prefix=add_prefix("gate_up_proj", prefix), |
|
) |
|
self.down_proj = RowParallelLinear( |
|
intermediate_size, |
|
hidden_size, |
|
bias=False, |
|
quant_config=quant_config, |
|
prefix=add_prefix("down_proj", prefix), |
|
) |
|
if hidden_act != "silu": |
|
raise ValueError( |
|
f"Unsupported activation: {hidden_act}. " |
|
"Only silu is supported for now." |
|
) |
|
self.act_fn = SiluAndMul() |
|
|
|
def forward(self, x): |
|
gate_up, _ = self.gate_up_proj(x) |
|
x = self.act_fn(gate_up) |
|
x, _ = self.down_proj(x) |
|
return x |
|
|
|
|
|
class Qwen2Attention(nn.Module): |
|
def __init__( |
|
self, |
|
hidden_size: int, |
|
num_heads: int, |
|
num_kv_heads: int, |
|
head_dim: Optional[int] = None, |
|
layer_id: int = 0, |
|
rope_theta: float = 1000000, |
|
rope_scaling: Optional[Dict[str, Any]] = None, |
|
max_position_embeddings: int = 32768, |
|
quant_config: Optional[QuantizationConfig] = None, |
|
dual_chunk_attention_config: Optional[dict[str, Any]] = None, |
|
prefix: str = "", |
|
) -> None: |
|
super().__init__() |
|
self.hidden_size = hidden_size |
|
tp_size = get_tensor_model_parallel_world_size() |
|
self.total_num_heads = num_heads |
|
assert self.total_num_heads % tp_size == 0 |
|
self.num_heads = self.total_num_heads // tp_size |
|
self.total_num_kv_heads = num_kv_heads |
|
if self.total_num_kv_heads >= tp_size: |
|
|
|
|
|
assert self.total_num_kv_heads % tp_size == 0 |
|
else: |
|
|
|
|
|
assert tp_size % self.total_num_kv_heads == 0 |
|
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) |
|
if head_dim is not None: |
|
self.head_dim = head_dim |
|
else: |
|
self.head_dim = hidden_size // self.total_num_heads |
|
self.q_size = self.num_heads * self.head_dim |
|
self.kv_size = self.num_kv_heads * self.head_dim |
|
self.scaling = self.head_dim**-0.5 |
|
self.rope_theta = rope_theta |
|
self.max_position_embeddings = max_position_embeddings |
|
|
|
self.qkv_proj = QKVParallelLinear( |
|
hidden_size, |
|
self.head_dim, |
|
self.total_num_heads, |
|
self.total_num_kv_heads, |
|
bias=True, |
|
quant_config=quant_config, |
|
prefix=add_prefix("qkv_proj", prefix), |
|
) |
|
self.o_proj = RowParallelLinear( |
|
self.total_num_heads * self.head_dim, |
|
hidden_size, |
|
bias=False, |
|
quant_config=quant_config, |
|
prefix=add_prefix("o_proj", prefix), |
|
) |
|
|
|
self.rotary_emb = get_rope( |
|
self.head_dim, |
|
rotary_dim=self.head_dim, |
|
max_position=max_position_embeddings, |
|
base=rope_theta, |
|
rope_scaling=rope_scaling, |
|
dual_chunk_attention_config=dual_chunk_attention_config, |
|
) |
|
self.attn = RadixAttention( |
|
self.num_heads, |
|
self.head_dim, |
|
self.scaling, |
|
num_kv_heads=self.num_kv_heads, |
|
layer_id=layer_id, |
|
quant_config=quant_config, |
|
prefix=add_prefix("attn", prefix), |
|
) |
|
|
|
def forward( |
|
self, |
|
positions: torch.Tensor, |
|
hidden_states: torch.Tensor, |
|
forward_batch: ForwardBatch, |
|
) -> torch.Tensor: |
|
qkv, _ = self.qkv_proj(hidden_states) |
|
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) |
|
q, k = self.rotary_emb(positions, q, k) |
|
attn_output = self.attn(q, k, v, forward_batch) |
|
output, _ = self.o_proj(attn_output) |
|
return output |
|
|
|
|
|
class Qwen2DecoderLayer(nn.Module): |
|
def __init__( |
|
self, |
|
config: Qwen2Config, |
|
layer_id: int = 0, |
|
quant_config: Optional[QuantizationConfig] = None, |
|
prefix: str = "", |
|
alt_stream: Optional[torch.cuda.Stream] = None, |
|
) -> None: |
|
super().__init__() |
|
self.hidden_size = config.hidden_size |
|
rope_theta = getattr(config, "rope_theta", 1000000) |
|
rope_scaling = getattr(config, "rope_scaling", None) |
|
max_position_embeddings = getattr(config, "max_position_embeddings", 32768) |
|
head_dim = getattr(config, "head_dim", None) |
|
dual_chunk_attention_config = getattr( |
|
config, "dual_chunk_attention_config", None |
|
) |
|
self.self_attn = Qwen2Attention( |
|
hidden_size=self.hidden_size, |
|
num_heads=config.num_attention_heads, |
|
num_kv_heads=config.num_key_value_heads, |
|
head_dim=head_dim, |
|
layer_id=layer_id, |
|
rope_theta=rope_theta, |
|
rope_scaling=rope_scaling, |
|
max_position_embeddings=max_position_embeddings, |
|
quant_config=quant_config, |
|
dual_chunk_attention_config=dual_chunk_attention_config, |
|
prefix=add_prefix("self_attn", prefix), |
|
) |
|
self.mlp = Qwen2MLP( |
|
hidden_size=self.hidden_size, |
|
intermediate_size=config.intermediate_size, |
|
hidden_act=config.hidden_act, |
|
quant_config=quant_config, |
|
prefix=add_prefix("mlp", prefix), |
|
) |
|
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
|
self.post_attention_layernorm = RMSNorm( |
|
config.hidden_size, eps=config.rms_norm_eps |
|
) |
|
|
|
def forward( |
|
self, |
|
positions: torch.Tensor, |
|
hidden_states: torch.Tensor, |
|
forward_batch: ForwardBatch, |
|
residual: Optional[torch.Tensor], |
|
) -> Tuple[torch.Tensor, torch.Tensor]: |
|
|
|
if residual is None: |
|
residual = hidden_states |
|
hidden_states = self.input_layernorm(hidden_states) |
|
else: |
|
hidden_states, residual = self.input_layernorm(hidden_states, residual) |
|
hidden_states = self.self_attn( |
|
positions=positions, |
|
hidden_states=hidden_states, |
|
forward_batch=forward_batch, |
|
) |
|
|
|
|
|
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual) |
|
hidden_states = self.mlp(hidden_states) |
|
return hidden_states, residual |
|
|
|
|
|
class Qwen2Model(nn.Module): |
|
def __init__( |
|
self, |
|
config: Qwen2Config, |
|
quant_config: Optional[QuantizationConfig] = None, |
|
prefix: str = "", |
|
decoder_layer_type: type[nn.Module] = Qwen2DecoderLayer, |
|
alt_stream: Optional[torch.cuda.Stream] = None, |
|
) -> None: |
|
super().__init__() |
|
self.config = config |
|
self.padding_idx = config.pad_token_id |
|
self.vocab_size = config.vocab_size |
|
self.pp_group = get_pp_group() |
|
|
|
if self.pp_group.is_first_rank: |
|
self.embed_tokens = VocabParallelEmbedding( |
|
config.vocab_size, |
|
config.hidden_size, |
|
quant_config=quant_config, |
|
enable_tp=not global_server_args_dict["enable_dp_attention"], |
|
prefix=add_prefix("embed_tokens", prefix), |
|
) |
|
else: |
|
self.embed_tokens = PPMissingLayer() |
|
|
|
|
|
decoder_layer_type = decoder_layer_type or Qwen2DecoderLayer |
|
self.layers, self.start_layer, self.end_layer = make_layers( |
|
config.num_hidden_layers, |
|
lambda idx, prefix: decoder_layer_type( |
|
layer_id=idx, |
|
config=config, |
|
quant_config=quant_config, |
|
prefix=prefix, |
|
alt_stream=alt_stream, |
|
), |
|
pp_rank=self.pp_group.rank_in_group, |
|
pp_size=self.pp_group.world_size, |
|
prefix=add_prefix("layers", prefix), |
|
) |
|
if self.pp_group.is_last_rank: |
|
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
|
else: |
|
self.norm = PPMissingLayer(return_tuple=True) |
|
|
|
|
|
self.layers_to_capture = [] |
|
|
|
def get_input_embedding(self, input_ids: torch.Tensor) -> torch.Tensor: |
|
if hasattr(self.config, "scale_emb"): |
|
return self.get_input_embeddings()(input_ids) * self.config.scale_emb |
|
else: |
|
return self.get_input_embeddings()(input_ids) |
|
|
|
def get_input_embeddings(self) -> nn.Embedding: |
|
return self.embed_tokens |
|
|
|
def forward( |
|
self, |
|
input_ids: torch.Tensor, |
|
positions: torch.Tensor, |
|
forward_batch: ForwardBatch, |
|
input_embeds: torch.Tensor = None, |
|
pp_proxy_tensors: Optional[PPProxyTensors] = None, |
|
) -> Union[torch.Tensor, PPProxyTensors]: |
|
if self.pp_group.is_first_rank: |
|
if input_embeds is None: |
|
hidden_states = self.embed_tokens(input_ids) |
|
else: |
|
hidden_states = input_embeds |
|
residual = None |
|
else: |
|
assert pp_proxy_tensors is not None |
|
hidden_states = pp_proxy_tensors["hidden_states"] |
|
residual = pp_proxy_tensors["residual"] |
|
|
|
aux_hidden_states = [] |
|
for i in range(self.start_layer, self.end_layer): |
|
if i in self.layers_to_capture: |
|
aux_hidden_states.append( |
|
hidden_states + residual if residual is not None else hidden_states |
|
) |
|
layer = self.layers[i] |
|
hidden_states, residual = layer( |
|
positions, |
|
hidden_states, |
|
forward_batch, |
|
residual, |
|
) |
|
if not self.pp_group.is_last_rank: |
|
return PPProxyTensors( |
|
{ |
|
"hidden_states": hidden_states, |
|
"residual": residual, |
|
} |
|
) |
|
else: |
|
if hidden_states.shape[0] != 0: |
|
if residual is None: |
|
hidden_states = self.norm(hidden_states) |
|
else: |
|
hidden_states, _ = self.norm(hidden_states, residual) |
|
|
|
if len(aux_hidden_states) == 0: |
|
return hidden_states |
|
|
|
return hidden_states, aux_hidden_states |
|
|
|
|
|
|
|
|
|
def load_kv_cache_scales(self, quantization_param_path: str) -> None: |
|
tp_size = get_tensor_model_parallel_world_size() |
|
tp_rank = get_tensor_model_parallel_rank() |
|
for layer_idx, scaling_factor in kv_cache_scales_loader( |
|
quantization_param_path, |
|
tp_rank, |
|
tp_size, |
|
self.config.num_hidden_layers, |
|
self.config.__class__.model_type, |
|
): |
|
if not isinstance(self.layers[layer_idx], nn.Identity): |
|
layer_self_attn = self.layers[layer_idx].self_attn |
|
if hasattr(layer_self_attn.attn, "k_scale"): |
|
layer_self_attn.attn.k_scale = scaling_factor |
|
layer_self_attn.attn.v_scale = scaling_factor |
|
else: |
|
raise RuntimeError( |
|
"Self attention has no KV cache scaling " "factor attribute!" |
|
) |
|
|
|
|
|
class Qwen2ForCausalLM(nn.Module): |
|
|
|
default_bitsandbytes_target_modules = [ |
|
".gate_proj.", |
|
".down_proj.", |
|
".up_proj.", |
|
".q_proj.", |
|
".k_proj.", |
|
".v_proj.", |
|
".o_proj.", |
|
] |
|
bitsandbytes_stacked_params_mapping = { |
|
|
|
"q_proj": ("qkv_proj", 0), |
|
"k_proj": ("qkv_proj", 1), |
|
"v_proj": ("qkv_proj", 2), |
|
"gate_proj": ("gate_up_proj", 0), |
|
"up_proj": ("gate_up_proj", 1), |
|
} |
|
|
|
def __init__( |
|
self, |
|
config: Qwen2Config, |
|
quant_config: Optional[QuantizationConfig] = None, |
|
prefix: str = "", |
|
) -> None: |
|
super().__init__() |
|
self.pp_group = get_pp_group() |
|
self.config = config |
|
self.quant_config = quant_config |
|
self.model = Qwen2Model( |
|
config, quant_config=quant_config, prefix=add_prefix("model", prefix) |
|
) |
|
self.capture_aux_hidden_states = False |
|
|
|
|
|
if self.pp_group.is_last_rank: |
|
if self.pp_group.world_size == 1 and config.tie_word_embeddings: |
|
self.lm_head = self.model.embed_tokens |
|
else: |
|
self.lm_head = ParallelLMHead( |
|
config.vocab_size, |
|
config.hidden_size, |
|
quant_config=quant_config, |
|
prefix=add_prefix("lm_head", prefix), |
|
) |
|
else: |
|
|
|
self.lm_head = PPMissingLayer() |
|
|
|
|
|
if self.pp_group.world_size > 1 and config.tie_word_embeddings: |
|
if self.pp_group.is_first_rank: |
|
self.pp_group.send( |
|
self.model.embed_tokens.weight, dst=self.pp_group.last_rank |
|
) |
|
else: |
|
emb_token_weight = self.pp_group.recv( |
|
size=(config.vocab_size, config.hidden_size), |
|
dtype=next(self.model.parameters()).dtype, |
|
src=self.pp_group.first_rank, |
|
) |
|
self.lm_head.weight.copy_(emb_token_weight) |
|
|
|
self.logits_processor = LogitsProcessor(config) |
|
self.pooler = Pooler(pooling_type=PoolingType.LAST, normalize=True) |
|
|
|
def get_input_embedding(self, input_ids: torch.Tensor) -> torch.Tensor: |
|
return self.model.get_input_embedding(input_ids) |
|
|
|
def get_input_embeddings(self) -> nn.Embedding: |
|
return self.model.embed_tokens |
|
|
|
@torch.no_grad() |
|
def forward( |
|
self, |
|
input_ids: torch.Tensor, |
|
positions: torch.Tensor, |
|
forward_batch: ForwardBatch, |
|
input_embeds: torch.Tensor = None, |
|
get_embedding: bool = False, |
|
pp_proxy_tensors: Optional[PPProxyTensors] = None, |
|
) -> torch.Tensor: |
|
hidden_states = self.model( |
|
input_ids, |
|
positions, |
|
forward_batch, |
|
input_embeds, |
|
pp_proxy_tensors=pp_proxy_tensors, |
|
) |
|
aux_hidden_states = None |
|
if self.capture_aux_hidden_states: |
|
hidden_states, aux_hidden_states = hidden_states |
|
|
|
if self.pp_group.is_last_rank: |
|
if not get_embedding: |
|
return self.logits_processor( |
|
input_ids, hidden_states, self.lm_head, forward_batch, aux_hidden_states |
|
) |
|
else: |
|
return self.pooler(hidden_states, forward_batch) |
|
else: |
|
return hidden_states |
|
|
|
@torch.no_grad() |
|
def forward_split_prefill( |
|
self, |
|
input_ids: torch.Tensor, |
|
positions: torch.Tensor, |
|
forward_batch: ForwardBatch, |
|
split_interval: Tuple[int, int], |
|
input_embeds: torch.Tensor = None, |
|
): |
|
start, end = split_interval |
|
|
|
if start == 0: |
|
if input_embeds is None: |
|
forward_batch.hidden_states = self.model.embed_tokens(input_ids) |
|
else: |
|
forward_batch.hidden_states = input_embeds |
|
|
|
for i in range(start, end): |
|
layer = self.model.layers[i] |
|
forward_batch.hidden_states, forward_batch.residual = layer( |
|
positions, |
|
forward_batch.hidden_states, |
|
forward_batch, |
|
forward_batch.residual, |
|
) |
|
|
|
if end == self.model.config.num_hidden_layers: |
|
|
|
hidden_states, _ = self.model.norm( |
|
forward_batch.hidden_states, forward_batch.residual |
|
) |
|
forward_batch.hidden_states = hidden_states |
|
|
|
result = self.logits_processor( |
|
input_ids, forward_batch.hidden_states, self.lm_head, forward_batch |
|
) |
|
else: |
|
result = None |
|
|
|
return result |
|
|
|
@property |
|
def start_layer(self): |
|
return self.model.start_layer |
|
|
|
@property |
|
def end_layer(self): |
|
return self.model.end_layer |
|
|
|
def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): |
|
stacked_params_mapping = [ |
|
|
|
("qkv_proj", "q_proj", "q"), |
|
("qkv_proj", "k_proj", "k"), |
|
("qkv_proj", "v_proj", "v"), |
|
("gate_up_proj", "gate_proj", 0), |
|
("gate_up_proj", "up_proj", 1), |
|
] |
|
|
|
params_dict = dict(self.named_parameters()) |
|
for name, loaded_weight in weights: |
|
layer_id = get_layer_id(name) |
|
if ( |
|
layer_id is not None |
|
and hasattr(self.model, "start_layer") |
|
and ( |
|
layer_id < self.model.start_layer |
|
or layer_id >= self.model.end_layer |
|
) |
|
): |
|
continue |
|
|
|
if "rotary_emb.inv_freq" in name or "projector" in name: |
|
continue |
|
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name: |
|
|
|
|
|
continue |
|
if self.config.tie_word_embeddings and "lm_head.weight" in name: |
|
if self.pp_group.world_size > 1 and self.pp_group.is_last_rank: |
|
|
|
|
|
embed_token_weights = next( |
|
filter(lambda x: x[0] == "model.embed_tokens.weight", weights) |
|
)[1] |
|
loaded_weight = embed_token_weights |
|
else: |
|
continue |
|
if name.startswith("model.vision_tower") and name not in params_dict: |
|
continue |
|
|
|
for param_name, weight_name, shard_id in stacked_params_mapping: |
|
if weight_name not in name: |
|
continue |
|
name = name.replace(weight_name, param_name) |
|
|
|
if name.endswith(".bias") and name not in params_dict: |
|
continue |
|
if name not in params_dict: |
|
continue |
|
param = params_dict[name] |
|
weight_loader = param.weight_loader |
|
weight_loader(param, loaded_weight, shard_id) |
|
break |
|
else: |
|
|
|
if name.endswith(".bias") and name not in params_dict: |
|
continue |
|
|
|
if name in params_dict.keys(): |
|
param = params_dict[name] |
|
weight_loader = getattr( |
|
param, "weight_loader", default_weight_loader |
|
) |
|
weight_loader(param, loaded_weight) |
|
else: |
|
logger.warning(f"Parameter {name} not found in params_dict") |
|
|
|
def get_embed_and_head(self): |
|
return self.model.embed_tokens.weight, self.lm_head.weight |
|
|
|
def set_embed_and_head(self, embed, head): |
|
del self.model.embed_tokens.weight |
|
del self.lm_head.weight |
|
self.model.embed_tokens.weight = embed |
|
self.lm_head.weight = head |
|
torch.cuda.empty_cache() |
|
torch.cuda.synchronize() |
|
|
|
def load_kv_cache_scales(self, quantization_param_path: str) -> None: |
|
self.model.load_kv_cache_scales(quantization_param_path) |
|
|
|
def set_eagle3_layers_to_capture(self, layer_ids: Optional[List[int]] = None): |
|
if not self.pp_group.is_last_rank: |
|
return |
|
|
|
self.capture_aux_hidden_states = True |
|
if layer_ids is None: |
|
num_layers = self.config.num_hidden_layers |
|
self.model.layers_to_capture = [ |
|
2, |
|
num_layers // 2, |
|
num_layers - 3, |
|
] |
|
else: |
|
self.model.layers_to_capture = [val + 1 for val in layer_ids] |
|
|
|
|
|
EntryClass = Qwen2ForCausalLM |
|
|