yujiepan commited on
Commit
9851b99
·
verified ·
1 Parent(s): 4f4f1d7

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +1 -46
  2. config.json +1 -5
  3. preprocessor_config.json +1 -1
README.md CHANGED
@@ -12,55 +12,10 @@ base_model:
12
 
13
  This tiny model is for debugging. It is randomly initialized with the config adapted from [google/gemma-3n-E4B-it](https://huggingface.co/google/gemma-3n-E4B-it).
14
 
15
- | Model ID | Notes |
16
- | ------------------------------------------------------------------------------------- | ------------------------------- |
17
- | [yujiepan/gemma-3n-tiny-random](https://huggingface.co/yujiepan/gemma-3n-tiny-random) | hidden size is 32 |
18
- | [yujiepan/gemma-3n-tiny-random-dim4](https://huggingface.co/yujiepan/gemma-3n-tiny-random-dim4) | hidden size is 4; potentially not supported in paged attention kernels|
19
-
20
  ### Example usage:
21
 
22
  ```python
23
- import torch
24
-
25
- from transformers import pipeline
26
-
27
- model_id = "yujiepan/gemma-3n-tiny-random-dim4"
28
- pipe = pipeline(
29
- task="image-text-to-text",
30
- model=model_id,
31
- device=0,
32
- torch_dtype=torch.bfloat16
33
- )
34
-
35
- # temporary patch for audio tower
36
- from accelerate.hooks import ModelHook, add_hook_to_module
37
-
38
- class EnsureDtype(ModelHook):
39
- def pre_forward(self, module, *args, **kwargs):
40
- args = list(args)
41
- args[0] = args[0].to(module.dtype)
42
- return super().pre_forward(module, *args, **kwargs)
43
- add_hook_to_module(pipe.model.audio_tower, EnsureDtype())
44
-
45
- messages = [
46
- {
47
- "role": "system",
48
- "content": [
49
- {"type": "text", "text": "You are a helpful assistant."}
50
- ]
51
- },
52
- {
53
- "role": "user",
54
- "content": [
55
- {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"},
56
- # audio is buggy for now: bf16 x fp32
57
- {"type": "audio", "url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Audio/glass-breaking-151256.mp3"},
58
- {"type": "text", "text": "Which image is cuter?"},
59
- ]
60
- },
61
- ]
62
- result = pipe(messages, min_new_tokens=512, max_new_tokens=512, do_sample=True)
63
- print(result)
64
  ```
65
 
66
  ### Codes to create this repo:
 
12
 
13
  This tiny model is for debugging. It is randomly initialized with the config adapted from [google/gemma-3n-E4B-it](https://huggingface.co/google/gemma-3n-E4B-it).
14
 
 
 
 
 
 
15
  ### Example usage:
16
 
17
  ```python
18
+ {code_to_run.strip()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  ```
20
 
21
  ### Codes to create this repo:
config.json CHANGED
@@ -10,7 +10,6 @@
10
  "conf_conv_kernel_size": 5,
11
  "conf_num_attention_heads": 2,
12
  "conf_num_hidden_layers": 2,
13
- "conf_positional_bias_size": 256,
14
  "conf_reduction_factor": 4,
15
  "conf_residual_weight": 0.5,
16
  "gradient_clipping": 10000000000.0,
@@ -22,7 +21,6 @@
22
  128,
23
  32
24
  ],
25
- "sscp_conv_eps": 0.001,
26
  "sscp_conv_group_norm_eps": 0.001,
27
  "sscp_conv_kernel_size": [
28
  [
@@ -71,7 +69,6 @@
71
  "altup_active_idx": 0,
72
  "altup_coef_clip": 120.0,
73
  "altup_correct_scale": true,
74
- "altup_lr_multiplier": 1.0,
75
  "altup_num_inputs": 4,
76
  "attention_bias": false,
77
  "attention_dropout": 0.0,
@@ -100,7 +97,6 @@
100
  "num_hidden_layers": 4,
101
  "num_key_value_heads": 1,
102
  "num_kv_shared_layers": 2,
103
- "query_pre_attn_scalar": 256,
104
  "rms_norm_eps": 1e-06,
105
  "rope_local_base_freq": 10000.0,
106
  "rope_scaling": null,
@@ -115,7 +111,7 @@
115
  "transformers_version": "4.54.0.dev0",
116
  "vision_config": {
117
  "architecture": "mobilenetv5_300m_enc",
118
- "do_pooling": true,
119
  "hidden_size": 2048,
120
  "initializer_range": 0.02,
121
  "label_names": [
 
10
  "conf_conv_kernel_size": 5,
11
  "conf_num_attention_heads": 2,
12
  "conf_num_hidden_layers": 2,
 
13
  "conf_reduction_factor": 4,
14
  "conf_residual_weight": 0.5,
15
  "gradient_clipping": 10000000000.0,
 
21
  128,
22
  32
23
  ],
 
24
  "sscp_conv_group_norm_eps": 0.001,
25
  "sscp_conv_kernel_size": [
26
  [
 
69
  "altup_active_idx": 0,
70
  "altup_coef_clip": 120.0,
71
  "altup_correct_scale": true,
 
72
  "altup_num_inputs": 4,
73
  "attention_bias": false,
74
  "attention_dropout": 0.0,
 
97
  "num_hidden_layers": 4,
98
  "num_key_value_heads": 1,
99
  "num_kv_shared_layers": 2,
 
100
  "rms_norm_eps": 1e-06,
101
  "rope_local_base_freq": 10000.0,
102
  "rope_scaling": null,
 
111
  "transformers_version": "4.54.0.dev0",
112
  "vision_config": {
113
  "architecture": "mobilenetv5_300m_enc",
114
+ "do_pooling": false,
115
  "hidden_size": 2048,
116
  "initializer_range": 0.02,
117
  "label_names": [
preprocessor_config.json CHANGED
@@ -41,7 +41,7 @@
41
  "processor_class": "Gemma3nProcessor",
42
  "resample": 2,
43
  "rescale_factor": 0.00392156862745098,
44
- "return_attention_mask": false,
45
  "return_tensors": null,
46
  "sampling_rate": 16000,
47
  "size": {
 
41
  "processor_class": "Gemma3nProcessor",
42
  "resample": 2,
43
  "rescale_factor": 0.00392156862745098,
44
+ "return_attention_mask": true,
45
  "return_tensors": null,
46
  "sampling_rate": 16000,
47
  "size": {