File size: 4,736 Bytes
cb1f23f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
---
library_name: transformers
pipeline_tag: image-text-to-text
inference: true
widget:
- text: Hello!
example_title: Hello world
group: Python
base_model:
- openbmb/MiniCPM-V-4
---
This tiny model is for debugging. It is randomly initialized with the config adapted from [openbmb/MiniCPM-V-4](https://huggingface.co/openbmb/MiniCPM-V-4).
### Example usage:
```python
import numpy as np
import torch
from PIL import Image
from transformers import AutoModel, AutoTokenizer
model_id = "yujiepan/minicpm-v-4-tiny-random"
model = AutoModel.from_pretrained(model_id, trust_remote_code=True,
attn_implementation='sdpa', torch_dtype=torch.bfloat16)
model = model.eval().cuda()
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
image = Image.fromarray(np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8), 'RGB')
question = "What is the landform in the picture?"
msgs = [{'role': 'user', 'content': [image, question]}]
answer = model.chat(
msgs=msgs,
image=image,
tokenizer=tokenizer,
max_new_tokens=32,
)
print(answer)
# Second round chat, pass history context of multi-turn conversation
msgs.append({"role": "assistant", "content": [answer]})
msgs.append({"role": "user", "content": [
"What should I pay attention to when traveling here?"]})
answer = model.chat(
msgs=msgs,
image=None,
tokenizer=tokenizer,
max_new_tokens=32,
)
print(answer)
```
### Codes to create this repo:
```python
import json
from pathlib import Path
import accelerate
import torch
from huggingface_hub import hf_hub_download
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoProcessor,
AutoTokenizer,
GenerationConfig,
set_seed,
)
source_model_id = "openbmb/MiniCPM-V-4"
save_folder = "/tmp/yujiepan/minicpm-v-4-tiny-random"
processor = AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True)
processor.save_pretrained(save_folder)
with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model',), 'r', encoding='utf-8') as f:
config_json = json.load(f)
for k, v in config_json['auto_map'].items():
config_json['auto_map'][k] = f'{source_model_id}--{v}'
automap = config_json['auto_map']
config_json['head_dim'] = 32
config_json["hidden_size"] = 128 # required by Sampler -- num_heads=embed_dim // 128
config_json['intermediate_size'] = 128
config_json['num_attention_heads'] = 2
config_json['num_key_value_heads'] = 1
config_json['num_hidden_layers'] = 2
config_json['tie_word_embeddings'] = True
factor = config_json['rope_scaling']['long_factor']
config_json['rope_scaling']['long_factor'] = factor[:16]
config_json['rope_scaling']['short_factor'] = factor[:16]
config_json['vision_config']['intermediate_size'] = 128
config_json['vision_config']['hidden_size'] = 64
config_json['vision_config']['num_attention_heads'] = 2
config_json['vision_config']['num_hidden_layers'] = 2
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
json.dump(config_json, f, indent=2)
config = AutoConfig.from_pretrained(
save_folder,
trust_remote_code=True,
)
print(config)
torch.set_default_dtype(torch.bfloat16)
model = AutoModel.from_config(config, trust_remote_code=True)
torch.set_default_dtype(torch.float32)
model.generation_config = GenerationConfig.from_pretrained(
source_model_id, trust_remote_code=True,
)
set_seed(42)
num_params = sum(p.numel() for p in model.parameters())
with torch.no_grad():
for name, p in sorted(model.named_parameters()):
torch.nn.init.normal_(p, 0, 0.1)
print(name, p.shape, p.dtype, p.device, f'{p.numel() / num_params * 100: .2f}%')
pass
model.save_pretrained(save_folder)
def modify_automap(path, source_model_id):
import json
with open(path, 'r', encoding='utf-8') as f:
content = json.load(f)
automap = {}
if content.get('auto_map', None) is not None:
for key, value in content.get('auto_map').items():
if isinstance(value, str):
value = source_model_id + '--' + value.split('--')[-1]
else:
value = [(source_model_id + '--' + v.split('--')[-1]) for v in value]
automap[key] = value
with open(path, 'w', encoding='utf-8') as f:
json.dump({**content, 'auto_map': automap}, f, indent=2)
modify_automap(f"{save_folder}/config.json", source_model_id)
modify_automap(f'{save_folder}/processor_config.json', source_model_id)
modify_automap(f'{save_folder}/preprocessor_config.json', source_model_id)
modify_automap(f'{save_folder}/tokenizer_config.json', source_model_id)
for f in Path(save_folder).glob('*.py'):
f.unlink()
``` |