{ "dropout": 0.101, "embed_dim": 768, "ff_dim": 2048, "max_seq_len": 1024, "num_decoder_layers": 12, "num_gqa_groups": 6, "num_heads": 12, "p": 0.14, "tie_weights": true, "vocab_size": 49152, "model_type": "fanformer", "architectures": [ "MultiModalModel" ], "auto_map": { "AutoConfig": "model_architecture.FanConfig", "AutoModelForCausalLM": "model_architecture.MultiModalModel" } }