freakreation commited on
Commit
4231883
·
verified ·
1 Parent(s): 27ee215

Upload model

Browse files
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - llama
4
+ - adapter-transformers
5
+ datasets:
6
+ - mrpc
7
+ ---
8
+
9
+ # Adapter `freakreation/my_adapterfusion_adapters` for ../models/llama-3.2-1b/llama-3.2-1b-model
10
+
11
+ An [adapter](https://adapterhub.ml) for the `../models/llama-3.2-1b/llama-3.2-1b-model` model that was trained on the [mrpc](https://huggingface.co/datasets/mrpc/) dataset and includes a prediction head for classification.
12
+
13
+ This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library.
14
+
15
+ ## Usage
16
+
17
+ First, install `adapters`:
18
+
19
+ ```
20
+ pip install -U adapters
21
+ ```
22
+
23
+ Now, the adapter can be loaded and activated like this:
24
+
25
+ ```python
26
+ from adapters import AutoAdapterModel
27
+
28
+ model = AutoAdapterModel.from_pretrained("../models/llama-3.2-1b/llama-3.2-1b-model")
29
+ adapter_name = model.load_adapter("freakreation/my_adapterfusion_adapters", set_active=True)
30
+ ```
31
+
32
+ ## Architecture & Training
33
+
34
+ <!-- Add some description here -->
35
+
36
+ ## Evaluation results
37
+
38
+ <!-- Add some description here -->
39
+
40
+ ## Citation
41
+
42
+ <!-- Add some description here -->
adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "dropout": 0.0,
6
+ "factorized_phm_W": true,
7
+ "factorized_phm_rule": false,
8
+ "hypercomplex_nonlinearity": "glorot-uniform",
9
+ "init_weights": "bert",
10
+ "inv_adapter": null,
11
+ "inv_adapter_reduction_factor": null,
12
+ "is_parallel": false,
13
+ "learn_phm": true,
14
+ "leave_out": [],
15
+ "ln_after": false,
16
+ "ln_before": false,
17
+ "mh_adapter": false,
18
+ "non_linearity": "relu",
19
+ "original_ln_after": true,
20
+ "original_ln_before": true,
21
+ "output_adapter": true,
22
+ "phm_bias": true,
23
+ "phm_c_init": "normal",
24
+ "phm_dim": 4,
25
+ "phm_init_range": 0.0001,
26
+ "phm_layer": false,
27
+ "phm_rank": 1,
28
+ "reduction_factor": 16,
29
+ "residual_before_ln": true,
30
+ "scaling": 1.0,
31
+ "shared_W_phm": false,
32
+ "shared_phm_rule": true,
33
+ "stochastic_depth": 0.0,
34
+ "use_gating": false
35
+ },
36
+ "hidden_size": 2048,
37
+ "model_class": "LlamaAdapterModel",
38
+ "model_name": "../models/llama-3.2-1b/llama-3.2-1b-model",
39
+ "model_type": "llama",
40
+ "name": "mrpc_seq_bn_upload",
41
+ "version": "adapters.1.1.0"
42
+ }
head_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "tanh",
4
+ "bias": true,
5
+ "dropout_prob": null,
6
+ "head_type": "classification",
7
+ "label2id": {
8
+ "LABEL_0": 0,
9
+ "LABEL_1": 1
10
+ },
11
+ "layers": 2,
12
+ "num_labels": 2,
13
+ "use_pooler": false
14
+ },
15
+ "hidden_size": 2048,
16
+ "model_class": "LlamaAdapterModel",
17
+ "model_name": "../models/llama-3.2-1b/llama-3.2-1b-model",
18
+ "model_type": "llama",
19
+ "name": "mrpc_seq_bn_upload",
20
+ "version": "adapters.1.1.0"
21
+ }
pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92db96a25af62b5ab052674c51bb98fc59f645903df74fefcbe1d120e11e6372
3
+ size 33716982
pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2a235ff58288e245e8a3ae823813edda4502264d7d9542d1f6669787cc0d4f9
3
+ size 16803944