skar0 commited on
Commit
ada7afa
·
verified ·
1 Parent(s): eba1b9a

Upload tiny random Llama-3.3 model (seed 42)

Browse files
Files changed (3) hide show
  1. README.md +7 -7
  2. config.json +8 -14
  3. model.safetensors +2 -2
README.md CHANGED
@@ -16,24 +16,24 @@ This is a tiny random version of the meta-llama/Llama-3.3-70B-Instruct model, cr
16
 
17
  - **Base model**: meta-llama/Llama-3.3-70B-Instruct
18
  - **Seed**: 42
19
- - **Hidden size**: 128
20
- - **Number of layers**: 4
21
- - **Number of attention heads**: 4
22
  - **Vocabulary size**: 128256
23
  - **Max position embeddings**: 2048
24
 
25
  ## Parameters
26
 
27
- - **Total parameters**: ~62,031,104
28
- - **Trainable parameters**: ~62,031,104
29
 
30
  ## Usage
31
 
32
  ```python
33
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
34
 
35
  # Load model and tokenizer
36
- model = AutoModelForSequenceClassification.from_pretrained("AlignmentResearch/Llama-3.3-Tiny-Instruct")
37
  tokenizer = AutoTokenizer.from_pretrained("AlignmentResearch/Llama-3.3-Tiny-Instruct")
38
 
39
  # Generate text (note: this model has random weights!)
 
16
 
17
  - **Base model**: meta-llama/Llama-3.3-70B-Instruct
18
  - **Seed**: 42
19
+ - **Hidden size**: 768
20
+ - **Number of layers**: 12
21
+ - **Number of attention heads**: 12
22
  - **Vocabulary size**: 128256
23
  - **Max position embeddings**: 2048
24
 
25
  ## Parameters
26
 
27
+ - **Total parameters**: ~272,517,888
28
+ - **Trainable parameters**: ~272,517,888
29
 
30
  ## Usage
31
 
32
  ```python
33
+ from transformers import AutoModelForCausalLM, AutoTokenizer
34
 
35
  # Load model and tokenizer
36
+ model = AutoModelForCausalLM.from_pretrained("AlignmentResearch/Llama-3.3-Tiny-Instruct")
37
  tokenizer = AutoTokenizer.from_pretrained("AlignmentResearch/Llama-3.3-Tiny-Instruct")
38
 
39
  # Generate text (note: this model has random weights!)
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "architectures": [
3
- "LlamaForSequenceClassification"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
@@ -10,23 +10,17 @@
10
  128008,
11
  128009
12
  ],
13
- "head_dim": 128,
14
  "hidden_act": "silu",
15
- "hidden_size": 128,
16
- "id2label": {
17
- "0": "LABEL_0"
18
- },
19
  "initializer_range": 0.02,
20
- "intermediate_size": 28672,
21
- "label2id": {
22
- "LABEL_0": 0
23
- },
24
  "max_position_embeddings": 2048,
25
  "mlp_bias": false,
26
  "model_type": "llama",
27
- "num_attention_heads": 4,
28
- "num_hidden_layers": 4,
29
- "num_key_value_heads": 8,
30
  "pretraining_tp": 1,
31
  "rms_norm_eps": 1e-05,
32
  "rope_scaling": {
@@ -38,7 +32,7 @@
38
  },
39
  "rope_theta": 500000.0,
40
  "tie_word_embeddings": false,
41
- "torch_dtype": "bfloat16",
42
  "transformers_version": "4.54.1",
43
  "use_cache": true,
44
  "vocab_size": 128256
 
1
  {
2
  "architectures": [
3
+ "LlamaForCausalLM"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
 
10
  128008,
11
  128009
12
  ],
13
+ "head_dim": 64,
14
  "hidden_act": "silu",
15
+ "hidden_size": 768,
 
 
 
16
  "initializer_range": 0.02,
17
+ "intermediate_size": 2048,
 
 
 
18
  "max_position_embeddings": 2048,
19
  "mlp_bias": false,
20
  "model_type": "llama",
21
+ "num_attention_heads": 12,
22
+ "num_hidden_layers": 12,
23
+ "num_key_value_heads": 4,
24
  "pretraining_tp": 1,
25
  "rms_norm_eps": 1e-05,
26
  "rope_scaling": {
 
32
  },
33
  "rope_theta": 500000.0,
34
  "tie_word_embeddings": false,
35
+ "torch_dtype": "float32",
36
  "transformers_version": "4.54.1",
37
  "use_cache": true,
38
  "vocab_size": 128256
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01469a10977f03f5da4ae178136135866166b2b291237bddb6e7d8a569d823bc
3
- size 124066768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51111d7052986501f4f8976ed86e6c838a273ddea1d9d8ce12d92d05c4d75c80
3
+ size 1090083944