Update README.md
Browse files
README.md
CHANGED
|
@@ -102,6 +102,36 @@ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
|
| 102 |
|
| 103 |
## Training Details
|
| 104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
### Training Data
|
| 106 |
|
| 107 |
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
|
|
|
| 102 |
|
| 103 |
## Training Details
|
| 104 |
|
| 105 |
+
```
|
| 106 |
+
from transformers import ViTImageProcessor, AutoTokenizer, VisionEncoderDecoderModel
|
| 107 |
+
from datasets import load_dataset
|
| 108 |
+
|
| 109 |
+
image_processor = ViTImageProcessor.from_pretrained("LeroyDyer/Mixtral_AI_Cyber_Q_Vision")
|
| 110 |
+
tokenizer = AutoTokenizer.from_pretrained("LeroyDyer/Mixtral_AI_Cyber_Q_Vision")
|
| 111 |
+
model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(
|
| 112 |
+
"LeroyDyer/Mixtral_AI_Cyber_Q_Vision", "LeroyDyer/Mixtral_AI_Cyber_Q_Vision"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
model.config.decoder_start_token_id = tokenizer.cls_token_id
|
| 116 |
+
model.config.pad_token_id = tokenizer.pad_token_id
|
| 117 |
+
|
| 118 |
+
dataset = load_dataset("huggingface/cats-image")
|
| 119 |
+
image = dataset["test"]["image"][0]
|
| 120 |
+
pixel_values = image_processor(image, return_tensors="pt").pixel_values
|
| 121 |
+
|
| 122 |
+
labels = tokenizer(
|
| 123 |
+
"an image of two cats chilling on a couch",
|
| 124 |
+
return_tensors="pt",
|
| 125 |
+
).input_ids
|
| 126 |
+
|
| 127 |
+
# the forward function automatically creates the correct decoder_input_ids
|
| 128 |
+
loss = model(pixel_values=pixel_values, labels=labels).loss
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
|
| 135 |
### Training Data
|
| 136 |
|
| 137 |
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|