LXT commited on
Commit
3bf5ce3
·
verified ·
1 Parent(s): e7e9303

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -12
README.md CHANGED
@@ -1,21 +1,19 @@
1
  ---
2
- license: mit
3
  pipeline_tag: image-text-to-text
4
  library_name: transformers
5
  base_model:
6
- - OpenGVLab/InternVL2-1B
7
- - OpenGVLab/InternVL2_5-8B
8
- - OpenGVLab/InternVL2_5-4B
9
- - OpenGVLab/InternViT-300M-448px-V2_5
10
- - internlm/internlm2_5-7b-chat
11
- - Qwen/Qwen2-0.5B-Instruct
12
- - Qwen/Qwen2.5-3B-Instruct
13
  base_model_relation: merge
14
  language:
15
- - multilingual
16
  tags:
17
- - Sa2VA
18
- - custom_code
19
  ---
20
 
21
  # Sa2VA: Marrying SAM2 with LLaVA for Dense Grounded Understanding of Images and Videos
@@ -160,4 +158,4 @@ If you find this project useful in your research, please consider citing:
160
  journal={arXiv preprint},
161
  year={2025}
162
  }
163
- ```
 
1
  ---
2
+ license: apache-2.0
3
  pipeline_tag: image-text-to-text
4
  library_name: transformers
5
  base_model:
6
+ - OpenGVLab/InternVL2-1B
7
+ - OpenGVLab/InternVL2_5-8B
8
+ - OpenGVLab/InternVL2_5-4B
9
+ - OpenGVLab/InternViT-300M-448px-V2_5
10
+ - internlm/internlm2_5-7b-chat
11
+ - Qwen/Qwen2.5-3B-Instruct
 
12
  base_model_relation: merge
13
  language:
14
+ - multilingual
15
  tags:
16
+ - Sa2VA
 
17
  ---
18
 
19
  # Sa2VA: Marrying SAM2 with LLaVA for Dense Grounded Understanding of Images and Videos
 
158
  journal={arXiv preprint},
159
  year={2025}
160
  }
161
+ ```