{ "activation_function": "gelu", "allow_embedding_resizing": true, "architectures": [ "ModernBertForMaskedLM" ], "attention_bias": false, "attention_dropout": 0.0, "attention_layer": "rope", "attention_probs_dropout_prob": 0.0, "attn_out_bias": false, "attn_out_dropout_prob": 0.1, "attn_qkv_bias": false, "bert_layer": "prenorm", "bos_token_id": 50281, "classifier_activation": "gelu", "classifier_bias": false, "classifier_dropout": 0.0, "classifier_pooling": "cls", "cls_token_id": 50281, "compile_model": true, "decoder_bias": true, "deterministic_flash_attn": false, "embed_dropout_prob": 0.0, "embed_norm": true, "embedding_dropout": 0.0, "embedding_layer": "sans_pos", "eos_token_id": 50282, "final_norm": true, "global_attn_every_n_layers": 3, "global_rope_theta": 160000.0, "head_pred_act": "gelu", "hidden_act": "gelu", "hidden_activation": "gelu", "hidden_size": 384, "init_method": "full_megatron", "initializer_cutoff_factor": 2.0, "initializer_range": 0.02, "intermediate_size": 576, "local_attention": 128, "local_attn_rotary_emb_base": 10000.0, "local_rope_theta": 10000.0, "loss_function": "fa_cross_entropy", "loss_kwargs": { "reduction": "mean" }, "masked_prediction": true, "max_position_embeddings": 8192, "mlp_bias": false, "mlp_dropout": 0.0, "mlp_dropout_prob": 0.0, "mlp_in_bias": false, "mlp_layer": "glu", "mlp_out_bias": false, "model_type": "modernbert", "norm_bias": false, "norm_eps": 1e-05, "norm_kwargs": { "bias": false, "eps": 1e-05 }, "normalization": "layernorm", "num_attention_heads": 6, "num_hidden_layers": 12, "pad_token_id": 50283, "padding": "unpadded", "reference_compile": null, "repad_logits_with_grad": false, "rotary_emb_base": 160000.0, "rotary_emb_dim": null, "rotary_emb_interleaved": false, "rotary_emb_scale_base": null, "sep_token_id": 50282, "skip_first_prenorm": true, "sliding_window": 128, "sparse_pred_ignore_index": -100, "sparse_prediction": false, "torch_dtype": "float32", "transformers_version": "4.48.1", "unpad_embeddings": true, "vocab_size": 50368 }