add AIBOM
#13
by
sabato-nocera
- opened
liuhaotian_llava-v1.6-mistral-7b.json
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bomFormat": "CycloneDX",
|
3 |
+
"specVersion": "1.6",
|
4 |
+
"serialNumber": "urn:uuid:41ae32a7-2fc0-431a-b154-8d65e12c9b29",
|
5 |
+
"version": 1,
|
6 |
+
"metadata": {
|
7 |
+
"timestamp": "2025-06-05T09:41:42.475961+00:00",
|
8 |
+
"component": {
|
9 |
+
"type": "machine-learning-model",
|
10 |
+
"bom-ref": "liuhaotian/llava-v1.6-mistral-7b-847e8f91-dade-52f2-9352-ab32573990ab",
|
11 |
+
"name": "liuhaotian/llava-v1.6-mistral-7b",
|
12 |
+
"externalReferences": [
|
13 |
+
{
|
14 |
+
"url": "https://huggingface.co/liuhaotian/llava-v1.6-mistral-7b",
|
15 |
+
"type": "documentation"
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"modelCard": {
|
19 |
+
"modelParameters": {
|
20 |
+
"task": "image-text-to-text",
|
21 |
+
"architectureFamily": "llava_mistral",
|
22 |
+
"modelArchitecture": "LlavaMistralForCausalLM"
|
23 |
+
},
|
24 |
+
"properties": [
|
25 |
+
{
|
26 |
+
"name": "library_name",
|
27 |
+
"value": "transformers"
|
28 |
+
}
|
29 |
+
],
|
30 |
+
"consideration": {
|
31 |
+
"useCases": "**Primary intended uses:**The primary use of LLaVA is research on large multimodal models and chatbots.**Primary intended users:**The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence."
|
32 |
+
}
|
33 |
+
},
|
34 |
+
"authors": [
|
35 |
+
{
|
36 |
+
"name": "liuhaotian"
|
37 |
+
}
|
38 |
+
],
|
39 |
+
"licenses": [
|
40 |
+
{
|
41 |
+
"license": {
|
42 |
+
"id": "Apache-2.0",
|
43 |
+
"url": "https://spdx.org/licenses/Apache-2.0.html"
|
44 |
+
}
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"description": "**Model type:**LLaVA is an open-source chatbot trained by fine-tuning LLM on multimodal instruction-following data.It is an auto-regressive language model, based on the transformer architecture.Base LLM: [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)**Model date:**LLaVA-v1.6-Mistral-7B was trained in December 2023.**Paper or resources for more information:**https://llava-vl.github.io/",
|
48 |
+
"tags": [
|
49 |
+
"transformers",
|
50 |
+
"safetensors",
|
51 |
+
"llava_mistral",
|
52 |
+
"text-generation",
|
53 |
+
"image-text-to-text",
|
54 |
+
"conversational",
|
55 |
+
"license:apache-2.0",
|
56 |
+
"autotrain_compatible",
|
57 |
+
"region:us"
|
58 |
+
]
|
59 |
+
}
|
60 |
+
}
|
61 |
+
}
|