skar0 commited on
Commit
ee12b1f
·
verified ·
1 Parent(s): 3dbe449

Upload tiny random Llama-3.3 model (seed 42)

Browse files
Files changed (3) hide show
  1. README.md +4 -4
  2. config.json +8 -2
  3. model.safetensors +2 -2
README.md CHANGED
@@ -24,16 +24,16 @@ This is a tiny random version of the meta-llama/Llama-3.3-70B-Instruct model, cr
24
 
25
  ## Parameters
26
 
27
- - **Total parameters**: ~75,110,656
28
- - **Trainable parameters**: ~75,110,656
29
 
30
  ## Usage
31
 
32
  ```python
33
- from transformers import AutoModelForCausalLM, AutoTokenizer
34
 
35
  # Load model and tokenizer
36
- model = AutoModelForCausalLM.from_pretrained("AlignmentResearch/Llama-3.3-Tiny-Instruct")
37
  tokenizer = AutoTokenizer.from_pretrained("AlignmentResearch/Llama-3.3-Tiny-Instruct")
38
 
39
  # Generate text (note: this model has random weights!)
 
24
 
25
  ## Parameters
26
 
27
+ - **Total parameters**: ~42,277,376
28
+ - **Trainable parameters**: ~42,277,376
29
 
30
  ## Usage
31
 
32
  ```python
33
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
34
 
35
  # Load model and tokenizer
36
+ model = AutoModelForSequenceClassification.from_pretrained("AlignmentResearch/Llama-3.3-Tiny-Instruct")
37
  tokenizer = AutoTokenizer.from_pretrained("AlignmentResearch/Llama-3.3-Tiny-Instruct")
38
 
39
  # Generate text (note: this model has random weights!)
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "architectures": [
3
- "LlamaForCausalLM"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
@@ -13,8 +13,14 @@
13
  "head_dim": 64,
14
  "hidden_act": "silu",
15
  "hidden_size": 256,
 
 
 
16
  "initializer_range": 0.02,
17
  "intermediate_size": 768,
 
 
 
18
  "max_position_embeddings": 131072,
19
  "mlp_bias": false,
20
  "model_type": "llama",
@@ -32,7 +38,7 @@
32
  },
33
  "rope_theta": 500000.0,
34
  "tie_word_embeddings": false,
35
- "torch_dtype": "float32",
36
  "transformers_version": "4.54.1",
37
  "use_cache": true,
38
  "vocab_size": 128256
 
1
  {
2
  "architectures": [
3
+ "LlamaForSequenceClassification"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
 
13
  "head_dim": 64,
14
  "hidden_act": "silu",
15
  "hidden_size": 256,
16
+ "id2label": {
17
+ "0": "LABEL_0"
18
+ },
19
  "initializer_range": 0.02,
20
  "intermediate_size": 768,
21
+ "label2id": {
22
+ "LABEL_0": 0
23
+ },
24
  "max_position_embeddings": 131072,
25
  "mlp_bias": false,
26
  "model_type": "llama",
 
38
  },
39
  "rope_theta": 500000.0,
40
  "tie_word_embeddings": false,
41
+ "torch_dtype": "bfloat16",
42
  "transformers_version": "4.54.1",
43
  "use_cache": true,
44
  "vocab_size": 128256
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90be80471e3b86e6f1aaea7c053977f9aa0f720669ca9635b5f97bba661a0f2c
3
- size 300454912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aadd7211b5c104ff62695304c0dbf8884790d7e99a148d2d2ca57b7fd077bdd4
3
+ size 84567424