lhallee commited on
Commit
34d280d
·
verified ·
1 Parent(s): 7bdd97f

Update modeling_esm_plusplus.py

Browse files
Files changed (1) hide show
  1. modeling_esm_plusplus.py +129 -28
modeling_esm_plusplus.py CHANGED
@@ -49,6 +49,7 @@ class ESMplusplusConfig(PretrainedConfig):
49
  num_labels: int = 2,
50
  problem_type: str | None = None,
51
  dropout: float = 0.0,
 
52
  **kwargs,
53
  ):
54
  super().__init__(**kwargs)
@@ -59,6 +60,7 @@ class ESMplusplusConfig(PretrainedConfig):
59
  self.num_labels = num_labels
60
  self.problem_type = problem_type
61
  self.dropout = dropout
 
62
 
63
 
64
  ### Rotary Embeddings
@@ -398,9 +400,7 @@ class UnifiedTransformerBlock(nn.Module):
398
  attn_output, attn_weights = self.attn(x, attention_mask, output_attentions)
399
  x = x + self.dropout(attn_output) / self.scaling_factor
400
  x = x + self.dropout(self.ffn(x)) / self.scaling_factor
401
- if output_attentions:
402
- return x, attn_weights
403
- return x
404
 
405
 
406
  ### Model Outputs
@@ -452,6 +452,7 @@ class TransformerStack(nn.Module):
452
  ]
453
  )
454
  self.norm = nn.LayerNorm(d_model, bias=False)
 
455
 
456
  def forward(
457
  self,
@@ -478,12 +479,18 @@ class TransformerStack(nn.Module):
478
  attention_mask = attention_mask[:, None, None, :].expand(batch_size, 1, seq_len, seq_len).bool()
479
 
480
  for block in self.blocks:
481
- if output_attentions:
482
- x, attn_weights = block(x, attention_mask, output_attentions)
483
- if attentions is not None:
484
- attentions += (attn_weights,)
 
 
 
485
  else:
486
- x = block(x, attention_mask, output_attentions)
 
 
 
487
 
488
  if output_hidden_states:
489
  assert hidden_states is not None
@@ -509,25 +516,30 @@ class ProteinDataset(Dataset):
509
  return self.sequences[idx]
510
 
511
 
512
- ### ESM++ Models
513
- class ESMplusplusForMaskedLM(PreTrainedModel):
514
- """ESM++ model for masked language modeling.
515
-
516
- Implements the base ESM++ architecture with a masked language modeling head.
517
  """
518
  config_class = ESMplusplusConfig
519
- def __init__(self, config: ESMplusplusConfig, **kwargs):
520
- super().__init__(config, **kwargs)
521
- self.config = config
522
- self.vocab_size = config.vocab_size
523
- self.embed = nn.Embedding(self.vocab_size, config.hidden_size)
524
- self.transformer = TransformerStack(config.hidden_size, config.num_attention_heads, config.num_hidden_layers, config.dropout)
525
- self.sequence_head = RegressionHead(config.hidden_size, self.vocab_size)
526
- self.ce_loss = nn.CrossEntropyLoss()
527
- self.tokenizer = EsmSequenceTokenizer()
 
 
 
 
 
 
 
528
 
529
  @classmethod
530
- def from_pretrained_esm(cls, model_name: str) -> "ESMplusplusForMaskedLM":
531
  """Load a pretrained ESM++ model."""
532
  if '300' in model_name:
533
  return ESMplusplus_300M()
@@ -548,6 +560,26 @@ class ESMplusplusForMaskedLM(PreTrainedModel):
548
  else:
549
  attention_mask = attention_mask.unsqueeze(-1)
550
  return (x * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
 
552
  def _collate_fn(self, sequences: list[str]) -> tuple[torch.Tensor, torch.Tensor]:
553
  """Collate function for batching sequences."""
@@ -606,8 +638,14 @@ class ESMplusplusForMaskedLM(PreTrainedModel):
606
  return residue_embeddings
607
  elif pooling_type == 'mean':
608
  return self.mean_pooling(residue_embeddings, attention_mask)
 
 
 
 
 
 
609
  else:
610
- return residue_embeddings[:, 0, :]
611
 
612
  if sql:
613
  import sqlite3
@@ -653,6 +691,67 @@ class ESMplusplusForMaskedLM(PreTrainedModel):
653
 
654
  return embeddings_dict
655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656
  def forward(
657
  self,
658
  input_ids: Optional[torch.Tensor] = None,
@@ -696,8 +795,8 @@ class ESMplusplusForMaskedLM(PreTrainedModel):
696
 
697
 
698
  class ESMplusplusForSequenceClassification(ESMplusplusForMaskedLM):
699
- """ESM++ model for sequence classification.
700
-
701
  Extends the base ESM++ model with a classification head.
702
  """
703
  def __init__(self, config: ESMplusplusConfig, **kwargs):
@@ -709,6 +808,7 @@ class ESMplusplusForSequenceClassification(ESMplusplusForMaskedLM):
709
  self.mse = nn.MSELoss()
710
  self.ce = nn.CrossEntropyLoss()
711
  self.bce = nn.BCEWithLogitsLoss()
 
712
 
713
  def forward(
714
  self,
@@ -776,8 +876,8 @@ class ESMplusplusForSequenceClassification(ESMplusplusForMaskedLM):
776
 
777
 
778
  class ESMplusplusForTokenClassification(ESMplusplusForMaskedLM):
779
- """ESM++ model for token classification.
780
-
781
  Extends the base ESM++ model with a token classification head.
782
  """
783
  def __init__(self, config: ESMplusplusConfig):
@@ -787,6 +887,7 @@ class ESMplusplusForTokenClassification(ESMplusplusForMaskedLM):
787
  self.classifier = RegressionHead(config.hidden_size, config.num_labels, config.hidden_size * 4)
788
  # Large intermediate projections help with sequence classification tasks (*4)
789
  self.loss_fct = nn.CrossEntropyLoss()
 
790
 
791
  def forward(
792
  self,
 
49
  num_labels: int = 2,
50
  problem_type: str | None = None,
51
  dropout: float = 0.0,
52
+ initializer_range: float = 0.02,
53
  **kwargs,
54
  ):
55
  super().__init__(**kwargs)
 
60
  self.num_labels = num_labels
61
  self.problem_type = problem_type
62
  self.dropout = dropout
63
+ self.initializer_range = initializer_range
64
 
65
 
66
  ### Rotary Embeddings
 
400
  attn_output, attn_weights = self.attn(x, attention_mask, output_attentions)
401
  x = x + self.dropout(attn_output) / self.scaling_factor
402
  x = x + self.dropout(self.ffn(x)) / self.scaling_factor
403
+ return x, attn_weights
 
 
404
 
405
 
406
  ### Model Outputs
 
452
  ]
453
  )
454
  self.norm = nn.LayerNorm(d_model, bias=False)
455
+ self.gradient_checkpointing = False
456
 
457
  def forward(
458
  self,
 
479
  attention_mask = attention_mask[:, None, None, :].expand(batch_size, 1, seq_len, seq_len).bool()
480
 
481
  for block in self.blocks:
482
+ if self.gradient_checkpointing and self.training:
483
+ x, attn_weights = self._gradient_checkpointing_func(
484
+ block.__call__,
485
+ x,
486
+ attention_mask,
487
+ output_attentions,
488
+ )
489
  else:
490
+ x, attn_weights = block(x, attention_mask, output_attentions)
491
+
492
+ if attentions is not None:
493
+ attentions += (attn_weights,)
494
 
495
  if output_hidden_states:
496
  assert hidden_states is not None
 
516
  return self.sequences[idx]
517
 
518
 
519
+ class PreTrainedESMplusplusModel(PreTrainedModel):
520
+ """
521
+ init weights for ESM++ models
 
 
522
  """
523
  config_class = ESMplusplusConfig
524
+ base_model_prefix = "esm++"
525
+ supports_gradient_checkpointing = True
526
+
527
+ def _init_weights(self, module):
528
+ """Initialize the weights"""
529
+ if isinstance(module, nn.Linear):
530
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
531
+ if module.bias is not None:
532
+ module.bias.data.zero_()
533
+ elif isinstance(module, nn.Embedding):
534
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
535
+ if module.padding_idx is not None:
536
+ module.weight.data[module.padding_idx].zero_()
537
+ elif isinstance(module, nn.LayerNorm):
538
+ module.bias.data.zero_()
539
+ module.weight.data.fill_(1.0)
540
 
541
  @classmethod
542
+ def from_pretrained_esm(cls, model_name: str):
543
  """Load a pretrained ESM++ model."""
544
  if '300' in model_name:
545
  return ESMplusplus_300M()
 
560
  else:
561
  attention_mask = attention_mask.unsqueeze(-1)
562
  return (x * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)
563
+
564
+ def max_pooling(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
565
+ """Apply max pooling to sequence outputs."""
566
+ if attention_mask is None:
567
+ return x.max(dim=1).values
568
+ else:
569
+ attention_mask = attention_mask.unsqueeze(-1)
570
+ return (x * attention_mask).max(dim=1).values
571
+
572
+ def min_pooling(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
573
+ """Apply min pooling to sequence outputs."""
574
+ if attention_mask is None:
575
+ return x.min(dim=1).values
576
+ else:
577
+ attention_mask = attention_mask.unsqueeze(-1)
578
+ return (x * attention_mask).min(dim=1).values
579
+
580
+ def cls_pooling(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
581
+ """Apply cls pooling to sequence outputs."""
582
+ return x[:, 0, :]
583
 
584
  def _collate_fn(self, sequences: list[str]) -> tuple[torch.Tensor, torch.Tensor]:
585
  """Collate function for batching sequences."""
 
638
  return residue_embeddings
639
  elif pooling_type == 'mean':
640
  return self.mean_pooling(residue_embeddings, attention_mask)
641
+ elif pooling_type == 'max':
642
+ return self.max_pooling(residue_embeddings, attention_mask)
643
+ elif pooling_type == 'min':
644
+ return self.min_pooling(residue_embeddings, attention_mask)
645
+ elif pooling_type == 'cls':
646
+ return self.cls_pooling(residue_embeddings, attention_mask)
647
  else:
648
+ raise ValueError(f"Invalid pooling type: {pooling_type}")
649
 
650
  if sql:
651
  import sqlite3
 
691
 
692
  return embeddings_dict
693
 
694
+
695
+ ### ESM++ Models
696
+ class ESMplusplusModel(PreTrainedESMplusplusModel):
697
+ """
698
+ ESM++ model. transformer model with no heads
699
+ """
700
+ config_class = ESMplusplusConfig
701
+ def __init__(self, config: ESMplusplusConfig, **kwargs):
702
+ super().__init__(config, **kwargs)
703
+ self.config = config
704
+ self.vocab_size = config.vocab_size
705
+ self.embed = nn.Embedding(self.vocab_size, config.hidden_size)
706
+ self.transformer = TransformerStack(config.hidden_size, config.num_attention_heads, config.num_hidden_layers, config.dropout)
707
+ self.tokenizer = EsmSequenceTokenizer()
708
+ self.init_weights()
709
+
710
+ def forward(
711
+ self,
712
+ input_ids: Optional[torch.Tensor] = None,
713
+ attention_mask: Optional[torch.Tensor] = None,
714
+ inputs_embeds: Optional[torch.Tensor] = None,
715
+ output_attentions: Optional[bool] = None,
716
+ output_hidden_states: Optional[bool] = None,
717
+ return_dict: Optional[bool] = None, # to play nice with HF adjacent packages
718
+ ) -> TransformerOutput:
719
+ """Forward pass for masked language modeling.
720
+
721
+ Args:
722
+ input_ids: Input token IDs
723
+ attention_mask: Attention mask
724
+ inputs_embeds: Optional precomputed embeddings
725
+ output_hidden_states: Whether to return all hidden states
726
+ output_attentions: Whether to return attention weights
727
+
728
+ Returns:
729
+ TransformerOutput containing last hidden state and optionally all hidden states and attention weights
730
+ """
731
+ if inputs_embeds is None:
732
+ x = self.embed(input_ids)
733
+ else:
734
+ x = inputs_embeds
735
+ return self.transformer(x, attention_mask, output_hidden_states, output_attentions)
736
+
737
+
738
+ class ESMplusplusForMaskedLM(PreTrainedESMplusplusModel):
739
+ """
740
+ ESM++ model for masked language modeling.
741
+ Implements the base ESM++ architecture with a masked language modeling head.
742
+ """
743
+ config_class = ESMplusplusConfig
744
+ def __init__(self, config: ESMplusplusConfig, **kwargs):
745
+ super().__init__(config, **kwargs)
746
+ self.config = config
747
+ self.vocab_size = config.vocab_size
748
+ self.embed = nn.Embedding(self.vocab_size, config.hidden_size)
749
+ self.transformer = TransformerStack(config.hidden_size, config.num_attention_heads, config.num_hidden_layers, config.dropout)
750
+ self.sequence_head = RegressionHead(config.hidden_size, self.vocab_size)
751
+ self.ce_loss = nn.CrossEntropyLoss()
752
+ self.tokenizer = EsmSequenceTokenizer()
753
+ self.init_weights()
754
+
755
  def forward(
756
  self,
757
  input_ids: Optional[torch.Tensor] = None,
 
795
 
796
 
797
  class ESMplusplusForSequenceClassification(ESMplusplusForMaskedLM):
798
+ """
799
+ ESM++ model for sequence classification.
800
  Extends the base ESM++ model with a classification head.
801
  """
802
  def __init__(self, config: ESMplusplusConfig, **kwargs):
 
808
  self.mse = nn.MSELoss()
809
  self.ce = nn.CrossEntropyLoss()
810
  self.bce = nn.BCEWithLogitsLoss()
811
+ self.init_weights()
812
 
813
  def forward(
814
  self,
 
876
 
877
 
878
  class ESMplusplusForTokenClassification(ESMplusplusForMaskedLM):
879
+ """
880
+ ESM++ model for token classification.
881
  Extends the base ESM++ model with a token classification head.
882
  """
883
  def __init__(self, config: ESMplusplusConfig):
 
887
  self.classifier = RegressionHead(config.hidden_size, config.num_labels, config.hidden_size * 4)
888
  # Large intermediate projections help with sequence classification tasks (*4)
889
  self.loss_fct = nn.CrossEntropyLoss()
890
+ self.init_weights()
891
 
892
  def forward(
893
  self,