|
""" |
|
AuriStream sequence model definition. |
|
""" |
|
|
|
import math |
|
import inspect |
|
import random |
|
import torch |
|
import torch.nn as nn |
|
from torch.nn import functional as F |
|
import numpy as np |
|
from huggingface_hub import PyTorchModelHubMixin |
|
from transformers.modeling_outputs import BaseModelOutput, CausalLMOutput |
|
from transformers import PreTrainedModel |
|
from .configuration_auristream import AuriStreamConfig |
|
|
|
|
|
class AuriStream(PreTrainedModel): |
|
config_class = AuriStreamConfig |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.config = config |
|
|
|
|
|
if hasattr(config, 'use_rope') and not config.use_rope: |
|
self.transformer = nn.ModuleDict(dict( |
|
wte = nn.Embedding(config.vocab_size, config.n_embd), |
|
wpe = nn.Embedding(config.seq_len, config.n_embd), |
|
drop = nn.Dropout(config.dropout), |
|
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
|
ln_f = RMSNorm(config.n_embd, bias=config.bias), |
|
)) |
|
else: |
|
self.transformer = nn.ModuleDict(dict( |
|
wte = nn.Embedding(config.vocab_size, config.n_embd), |
|
drop = nn.Dropout(config.dropout), |
|
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
|
ln_f = RMSNorm(config.n_embd, bias=config.bias), |
|
)) |
|
|
|
|
|
if hasattr(config, 'n_pred_steps'): |
|
self.future_heads = nn.ModuleList([nn.Linear(config.n_embd, config.vocab_size, bias=False) for _ in range(config.n_pred_steps - 1)]) |
|
else: |
|
self.future_heads = None |
|
|
|
self.coch_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
|
|
|
self.apply(self._init_weights) |
|
|
|
for pn, p in self.named_parameters(): |
|
if pn.endswith('c_proj.weight'): |
|
torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer)) |
|
|
|
def get_num_params(self, non_embedding=True): |
|
""" |
|
Return the number of parameters in the model. |
|
For non-embedding count (default), the position embeddings get subtracted. |
|
The token embeddings would too, except due to the parameter sharing these |
|
params are actually used as weights in the final layer, so we include them. |
|
""" |
|
n_params = sum(p.numel() for p in self.parameters()) |
|
return n_params |
|
|
|
def _init_weights(self, module): |
|
if isinstance(module, nn.Linear): |
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
|
if module.bias is not None: |
|
torch.nn.init.zeros_(module.bias) |
|
elif isinstance(module, nn.Embedding): |
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
|
|
|
def forward(self, seq, tgt=None, output_logits=False, output_hidden_states=False, return_dict=False, up_until_layer=None): |
|
""" |
|
Input: coch: torch.Tensor of shape (b, t) |
|
tgt_coch: torch.Tensor of shape (b, t) or None |
|
""" |
|
|
|
|
|
tok_emb = self.transformer.wte(seq) |
|
|
|
|
|
if hasattr(self.transformer, 'wpe'): |
|
pos = torch.arange(0, seq.size(1), dtype=torch.long, device=seq.device) |
|
pos_emb = self.transformer.wpe(pos) |
|
x = self.transformer.drop(tok_emb + pos_emb) |
|
else: |
|
x = self.transformer.drop(tok_emb) |
|
|
|
all_hidden_states = [] |
|
for block_idx, block in enumerate(self.transformer.h): |
|
|
|
all_hidden_states.append(x) |
|
if up_until_layer is not None and block_idx == up_until_layer: |
|
break |
|
x = block(x) |
|
|
|
|
|
if up_until_layer is None or block_idx == len(self.transformer.h) - 1: |
|
all_hidden_states.append(x) |
|
|
|
if output_hidden_states and not output_logits: |
|
model_output = BaseModelOutput( |
|
last_hidden_state=x, |
|
hidden_states=all_hidden_states, |
|
) |
|
return model_output |
|
|
|
x = self.transformer.ln_f(x) |
|
logits = self.coch_head(x) |
|
|
|
if tgt is not None: |
|
|
|
if output_logits: |
|
all_logits = [logits] |
|
|
|
loss = F.cross_entropy( |
|
logits.reshape(-1, self.config.vocab_size), tgt.reshape(-1), |
|
) |
|
|
|
|
|
if self.future_heads is not None: |
|
for i, head in enumerate(self.future_heads): |
|
future_logits = head(x[:, :-(i+1)]) |
|
loss += F.cross_entropy( |
|
future_logits.reshape(-1, self.config.vocab_size), tgt[:, (i+1):].reshape(-1), |
|
) |
|
if output_logits: |
|
all_logits.append(future_logits) |
|
|
|
loss = loss / (len(self.future_heads) + 1) |
|
|
|
if return_dict: |
|
if output_logits: |
|
if output_hidden_states: |
|
model_output = CausalLMOutput( |
|
loss=loss, |
|
logits=all_logits, |
|
hidden_states=all_hidden_states, |
|
) |
|
else: |
|
model_output = CausalLMOutput( |
|
loss=loss, |
|
logits=all_logits, |
|
) |
|
else: |
|
if output_hidden_states: |
|
model_output = CausalLMOutput( |
|
loss=loss, |
|
logits=logits, |
|
hidden_states=all_hidden_states, |
|
) |
|
else: |
|
model_output = CausalLMOutput( |
|
loss=loss, |
|
logits=logits, |
|
) |
|
return model_output |
|
|
|
return logits, loss |
|
|
|
return logits, None |
|
|
|
def sample_logits(self, logits: torch.FloatTensor, temperature: float = 0.9, |
|
top_k: int = 500, top_p: float = 0.5) -> torch.LongTensor: |
|
""" |
|
Samples an integer from the distribution of logits |
|
Parameters: |
|
logits (torch.FloatTensor): The logits of the distribution |
|
temp (float): The temperature of the sampling, if 0.0, then argmax is used |
|
top_k (int): The number of top k tokens to consider during sampling |
|
top_p (float): The cumulative probability threshold for nucleus (top-p) sampling |
|
Returns: |
|
torch.LongTensor: The sampled integer |
|
""" |
|
|
|
if temperature == 0.0: |
|
return torch.argmax(logits, dim=-1) |
|
|
|
|
|
logits = logits / temperature |
|
|
|
|
|
if top_k is not None: |
|
v, _ = torch.topk(logits, min(top_k, logits.size(-1))) |
|
logits[logits < v[..., [-1]]] = -float('Inf') |
|
|
|
|
|
if top_p is not None: |
|
|
|
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) |
|
|
|
sorted_probs = F.softmax(sorted_logits, dim=-1) |
|
|
|
cumulative_probs = torch.cumsum(sorted_probs, dim=-1) |
|
|
|
sorted_indices_to_remove = cumulative_probs > top_p |
|
|
|
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() |
|
sorted_indices_to_remove[..., 0] = 0 |
|
|
|
indices_to_remove = sorted_indices_to_remove.scatter(dim=-1, index=sorted_indices, src=sorted_indices_to_remove) |
|
logits[indices_to_remove] = -float('Inf') |
|
|
|
|
|
probs = F.softmax(logits, dim=-1) |
|
|
|
flat_probs = probs.view(-1, probs.size(-1)) |
|
|
|
sampled = torch.multinomial(flat_probs, num_samples=1) |
|
|
|
sampled = sampled.view(*logits.shape[:-1]) |
|
return sampled |
|
|
|
@torch.no_grad() |
|
def generate(self, seq: torch.Tensor, n_tokens: int = 1, temp=1.0, |
|
top_k=500, top_p=0.5, seed=None): |
|
""" |
|
Parameters: |
|
seq: torch.Tensor of shape (b, t, n_freq_bins) |
|
Input cochleagram to use for generation |
|
n_tokens: int |
|
Number of time bins to predict |
|
temp: float |
|
Temperature for sampling logits |
|
seed: int |
|
Random seed for sampling |
|
|
|
Returns: |
|
pred_coch: torch.Tensor of shape (b, t, n_freq_bins) |
|
The predicted cochleagram |
|
all_logits: (optional if return_logits is True) torch.Tensor of shape (b, n_tokens, n_freq_bins) |
|
The logits for each time step |
|
all_embs: (optional if return_embs is not None) list of torch.Tensor |
|
The embeddings for each transformer block |
|
""" |
|
|
|
|
|
if seed is not None: |
|
random.seed(seed) |
|
np.random.seed(seed) |
|
torch.manual_seed(seed) |
|
|
|
|
|
all_logits = [] |
|
device = seq.device |
|
|
|
|
|
b, t = seq.size() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tok_emb = self.transformer.wte(seq) |
|
|
|
if hasattr(self.transformer, 'wpe'): |
|
pos = torch.arange(0, seq.size(1), dtype=torch.long, device=seq.device) |
|
pos_emb = self.transformer.wpe(pos) |
|
x = self.transformer.drop(tok_emb + pos_emb) |
|
else: |
|
x = self.transformer.drop(tok_emb) |
|
|
|
|
|
k_list = [] |
|
v_list = [] |
|
for block_idx, block in enumerate(self.transformer.h): |
|
|
|
x, k, v = block(x, return_kv=True) |
|
k_list.append(k) |
|
v_list.append(v) |
|
|
|
k_cache = torch.stack(k_list, dim=0) |
|
v_cache = torch.stack(v_list, dim=0) |
|
|
|
x = self.transformer.ln_f(x) |
|
|
|
|
|
logits = self.coch_head(x[:, [-1]]) |
|
predictions = [self.sample_logits(logits, temperature=temp)] |
|
all_logits.append(logits) |
|
|
|
|
|
|
|
|
|
|
|
|
|
for i in range(n_tokens-1): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tok_emb = self.transformer.wte(predictions[-1]) |
|
|
|
if hasattr(self.transformer, 'wpe'): |
|
pos = torch.arange(t+i, t+i+1, dtype=torch.long, device=device) |
|
pos_emb = self.transformer.wpe(pos) |
|
x = self.transformer.drop(tok_emb + pos_emb) |
|
else: |
|
x = self.transformer.drop(tok_emb) |
|
|
|
|
|
k_list = [] |
|
v_list = [] |
|
for block_idx, block in enumerate(self.transformer.h): |
|
x, k, v = block(x, k_cache=k_cache[block_idx], v_cache=v_cache[block_idx]) |
|
k_list.append(k) |
|
v_list.append(v) |
|
x = self.transformer.ln_f(x) |
|
|
|
k_cache = torch.stack(k_list, dim=0) |
|
v_cache = torch.stack(v_list, dim=0) |
|
|
|
logits = self.coch_head(x) |
|
predictions.append(self.sample_logits(logits, temperature=temp, top_k=top_k, top_p=top_p)) |
|
all_logits.append(logits) |
|
|
|
pred_coch = torch.cat(predictions, dim=1) |
|
all_logits = torch.cat(all_logits, dim=1) |
|
|
|
return pred_coch, all_logits |
|
|
|
|
|
def configure_optimizers(self, weight_decay, learning_rate, betas, device_type): |
|
|
|
param_dict = {pn: p for pn, p in self.named_parameters()} |
|
|
|
param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad} |
|
|
|
|
|
decay_params = [p for n, p in param_dict.items() if p.dim() >= 2] |
|
nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2] |
|
optim_groups = [ |
|
{'params': decay_params, 'weight_decay': weight_decay}, |
|
{'params': nodecay_params, 'weight_decay': 0.0} |
|
] |
|
num_decay_params = sum(p.numel() for p in decay_params) |
|
num_nodecay_params = sum(p.numel() for p in nodecay_params) |
|
print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters") |
|
print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters") |
|
|
|
fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters |
|
use_fused = fused_available and device_type == 'cuda' |
|
extra_args = dict(fused=True) if use_fused else dict() |
|
optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args) |
|
print(f"using fused AdamW: {use_fused}") |
|
|
|
return optimizer |
|
|
|
def estimate_mfu(self, fwdbwd_per_iter, T, dt, gpu_type='A40'): |
|
""" estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """ |
|
|
|
|
|
N = self.unsharded_param_count |
|
cfg = self.config |
|
L, H, Q = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head |
|
|
|
flops_per_token = 6*N + 12*L*H*Q*T |
|
flops_per_fwdbwd = flops_per_token * T |
|
flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter |
|
|
|
flops_achieved = flops_per_iter * (1.0/dt) |
|
|
|
|
|
if gpu_type == 'A40': |
|
flops_promised = 149.7e12 |
|
elif gpu_type == 'A100': |
|
flops_promised = 312e12 |
|
elif gpu_type == 'H100': |
|
flops_promised = 756e12 |
|
elif gpu_type == 'TPUv4': |
|
flops_promised = 275e12 |
|
elif gpu_type == 'TPUv5e': |
|
flops_promised = 197e12 |
|
|
|
mfu = flops_achieved / flops_promised |
|
return mfu |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Block(nn.Module): |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.attn = CausalSelfAttention(config) |
|
self.mlp = MLP(config) |
|
self.attn_scale = 1.0 |
|
self.norm1 = RMSNorm(config.n_embd, bias=config.bias) |
|
self.norm2 = RMSNorm(config.n_embd, bias=config.bias) |
|
|
|
def forward(self, x, return_kv=False, k_cache=None, v_cache=None): |
|
|
|
|
|
if k_cache is not None and v_cache is not None: |
|
|
|
x_attn, k, v = self.attn.kv_cache_forward(self.norm1(x), k_cache, v_cache) |
|
x = x + x_attn |
|
x = x + self.mlp(self.norm2(x)) |
|
return x, k, v |
|
|
|
|
|
elif return_kv: |
|
|
|
x_attn, k, v = self.attn(self.norm1(x), return_kv=True) |
|
x = x + x_attn |
|
x = x + self.mlp(self.norm2(x)) |
|
return x, k, v |
|
|
|
x = x + self.attn_scale * self.attn(self.norm1(x)) |
|
x = x + self.mlp(self.norm2(x)) |
|
return x |
|
|
|
|
|
class CausalSelfAttention(nn.Module): |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.n_head = config.n_head |
|
self.n_embd = config.n_embd |
|
self.head_dim = self.n_embd // self.n_head |
|
assert self.n_embd % self.n_head == 0 |
|
|
|
self.c_attn = nn.Linear(self.n_embd, 3 * self.n_embd, bias=False) |
|
|
|
self.c_proj = nn.Linear(self.n_embd, self.n_embd, bias=False) |
|
|
|
rope_theta = 500000 |
|
if hasattr(config, 'rope_theta') and config.rope_theta is not None: |
|
rope_theta = config.rope_theta |
|
|
|
self.rotary = Rotary(self.head_dim, base=rope_theta) |
|
|
|
if hasattr(config, 'use_rope') and not config.use_rope: |
|
self.rotary = None |
|
|
|
def forward(self, x, return_kv=False, return_attn_maps=False): |
|
|
|
B, T, C = x.size() |
|
|
|
qkv = self.c_attn(x) |
|
q, k, v = qkv.split(self.n_embd, dim=2) |
|
k = k.view(B, T, self.n_head, self.head_dim) |
|
q = q.view(B, T, self.n_head, self.head_dim) |
|
v = v.view(B, T, self.n_head, self.head_dim) |
|
|
|
if self.rotary is not None: |
|
cos, sin = self.rotary(q) |
|
q = apply_rotary_emb(q, cos, sin) |
|
k = apply_rotary_emb(k, cos, sin) |
|
|
|
if not return_kv and not return_attn_maps: |
|
y = F.scaled_dot_product_attention( |
|
q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), |
|
is_causal=True) |
|
else: |
|
|
|
q = q.transpose(1, 2) |
|
k = k.transpose(1, 2) |
|
v = v.transpose(1, 2) |
|
att = torch.einsum('bnsh,bnkh->bnsk', q, k) * (1.0 / math.sqrt(k.size(-1))) |
|
mask = torch.triu(torch.ones(T, T), diagonal=1).to(dtype=torch.bool).to(x.device) |
|
mask = mask.view(1, 1, T, T) |
|
masked_att = att.masked_fill(mask, float('-inf')) |
|
|
|
masked_att = F.softmax(masked_att, dim=-1, dtype=torch.float32).to(q.dtype) |
|
|
|
y = torch.einsum('bnsk,bnkh->bnsh', masked_att, v) |
|
|
|
y = y.transpose(1, 2).contiguous().view(B, T, C) |
|
|
|
|
|
y = self.c_proj(y) |
|
|
|
|
|
if return_attn_maps: |
|
return y, F.softmax(att, dim=-1) |
|
|
|
|
|
if return_kv: |
|
return y, k, v |
|
|
|
return y |
|
|
|
def kv_cache_forward(self, x, k_cache=None, v_cache=None): |
|
B, T, C = x.size() |
|
|
|
|
|
q, k, v = self.c_attn(x).split(self.n_embd, dim=2) |
|
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
|
|
|
if k_cache is not None: |
|
k = torch.cat((k_cache, k), dim=2) |
|
if v_cache is not None: |
|
v = torch.cat((v_cache, v), dim=2) |
|
|
|
|
|
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) |
|
att = F.softmax(att, dim=-1) |
|
y = att @ v |
|
|
|
y = y.transpose(1, 2).contiguous().view(B, T, C) |
|
|
|
|
|
y = self.c_proj(y) |
|
|
|
return y, k, v |
|
|
|
|
|
class MLP(nn.Module): |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) |
|
self.gelu = nn.SiLU() |
|
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) |
|
self.dropout = nn.Dropout(config.dropout) |
|
|
|
def forward(self, x): |
|
x = self.c_fc(x) |
|
x = self.gelu(x) |
|
x = self.c_proj(x) |
|
x = self.dropout(x) |
|
return x |
|
|
|
|
|
class Rotary(torch.nn.Module): |
|
def __init__(self, dim, base=500000, learned=True): |
|
super().__init__() |
|
|
|
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) |
|
|
|
if learned: |
|
|
|
self.inv_freq = torch.nn.Parameter(inv_freq) |
|
nn.init.normal_(self.inv_freq, mean=0.0, std=0.02) |
|
else: |
|
self.register_buffer("inv_freq", inv_freq) |
|
self.learned = learned |
|
|
|
def forward(self, x): |
|
seq_len = x.shape[1] |
|
|
|
t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) |
|
|
|
freqs = torch.outer(t, self.inv_freq).to(x.device) |
|
cos_cached = freqs.cos() |
|
sin_cached = freqs.sin() |
|
return cos_cached[None, :, None, :], sin_cached[None, :, None, :] |
|
|
|
def apply_rotary_emb(x, cos, sin): |
|
assert x.ndim == 4 |
|
d = x.shape[3] // 2 |
|
x1 = x[..., :d] |
|
x2 = x[..., d:] |
|
y1 = x1 * cos + x2 * sin |
|
y2 = x1 * (-sin) + x2 * cos |
|
return torch.cat([y1, y2], dim=3) |
|
|
|
|
|
class RMSNorm(nn.Module): |
|
""" Root Mean Square Normalization """ |
|
def __init__(self, dim: int, weight: bool = True, bias: bool = False, eps: float = 1e-6): |
|
super().__init__() |
|
self.eps = eps |
|
self.weight = nn.Parameter(torch.ones(dim)) if weight else None |
|
|
|
def _norm(self, x): |
|
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) |
|
|
|
def forward(self, x): |
|
output = self._norm(x.float()).type_as(x) |
|
if self.weight is not None: |
|
return output * self.weight |
|
return output |
|
|