Update README.md
Browse files
README.md
CHANGED
|
@@ -16,9 +16,9 @@ tags:
|
|
| 16 |
|
| 17 |
# Mini-InternVL-Chat-2B-V1-5
|
| 18 |
|
| 19 |
-
[\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL) [\[
|
| 20 |
|
| 21 |
-
[\[🗨️ Chat Demo\]](https://internvl.opengvlab.com/) [\[🤗 HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[🚀 Quick Start\]](#quick-start) [\[📖
|
| 22 |
|
| 23 |
## Introduction
|
| 24 |
|
|
@@ -547,6 +547,12 @@ This project is released under the MIT License. This project uses the pre-traine
|
|
| 547 |
If you find this project useful in your research, please consider citing:
|
| 548 |
|
| 549 |
```BibTeX
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 550 |
@article{gao2024mini,
|
| 551 |
title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
|
| 552 |
author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
|
|
@@ -559,10 +565,11 @@ If you find this project useful in your research, please consider citing:
|
|
| 559 |
journal={arXiv preprint arXiv:2404.16821},
|
| 560 |
year={2024}
|
| 561 |
}
|
| 562 |
-
@
|
| 563 |
-
title={
|
| 564 |
-
author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and
|
| 565 |
-
|
| 566 |
-
|
|
|
|
| 567 |
}
|
| 568 |
```
|
|
|
|
| 16 |
|
| 17 |
# Mini-InternVL-Chat-2B-V1-5
|
| 18 |
|
| 19 |
+
[\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL) [\[📜 InternVL 1.0\]](https://huggingface.co/papers/2312.14238) [\[📜 InternVL 1.5\]](https://huggingface.co/papers/2404.16821) [\[📜 Mini-InternVL\]](https://arxiv.org/abs/2410.16261) [\[📜 InternVL 2.5\]](https://huggingface.co/papers/2412.05271)
|
| 20 |
|
| 21 |
+
[\[🆕 Blog\]](https://internvl.github.io/blog/) [\[🗨️ Chat Demo\]](https://internvl.opengvlab.com/) [\[🤗 HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[🚀 Quick Start\]](#quick-start) [\[📖 Documents\]](https://internvl.readthedocs.io/en/latest/)
|
| 22 |
|
| 23 |
## Introduction
|
| 24 |
|
|
|
|
| 547 |
If you find this project useful in your research, please consider citing:
|
| 548 |
|
| 549 |
```BibTeX
|
| 550 |
+
@article{chen2024expanding,
|
| 551 |
+
title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling},
|
| 552 |
+
author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others},
|
| 553 |
+
journal={arXiv preprint arXiv:2412.05271},
|
| 554 |
+
year={2024}
|
| 555 |
+
}
|
| 556 |
@article{gao2024mini,
|
| 557 |
title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
|
| 558 |
author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
|
|
|
|
| 565 |
journal={arXiv preprint arXiv:2404.16821},
|
| 566 |
year={2024}
|
| 567 |
}
|
| 568 |
+
@inproceedings{chen2024internvl,
|
| 569 |
+
title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks},
|
| 570 |
+
author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others},
|
| 571 |
+
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
|
| 572 |
+
pages={24185--24198},
|
| 573 |
+
year={2024}
|
| 574 |
}
|
| 575 |
```
|