Update preprocessor_config.json
This commit is contained in:
parent
78d6bca530
commit
2c6d9ffdae
@ -7,8 +7,6 @@ pipeline_tag: image-text-to-text
|
|||||||
tags:
|
tags:
|
||||||
- multimodal
|
- multimodal
|
||||||
library_name: transformers
|
library_name: transformers
|
||||||
base_model:
|
|
||||||
- Qwen/Qwen2.5-VL-7B-Instruct
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Qwen2.5-VL-7B-Instruct
|
# Qwen2.5-VL-7B-Instruct
|
||||||
@ -142,13 +140,10 @@ Here we show a code snippet to show you how to use the chat model with `transfor
|
|||||||
```python
|
```python
|
||||||
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
||||||
from qwen_vl_utils import process_vision_info
|
from qwen_vl_utils import process_vision_info
|
||||||
from modelscope import snapshot_download
|
|
||||||
|
|
||||||
model_dir=snapshot_download("Qwen/Qwen2.5-VL-7B-Instruct")
|
|
||||||
|
|
||||||
# default: Load the model on the available device(s)
|
# default: Load the model on the available device(s)
|
||||||
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
||||||
model_dir, torch_dtype="auto", device_map="auto"
|
"Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto"
|
||||||
)
|
)
|
||||||
|
|
||||||
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
|
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
|
||||||
@ -160,7 +155,7 @@ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|||||||
# )
|
# )
|
||||||
|
|
||||||
# default processer
|
# default processer
|
||||||
processor = AutoProcessor.from_pretrained(model_dir)
|
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
|
||||||
|
|
||||||
# The default range for the number of visual tokens per image in the model is 4-16384.
|
# The default range for the number of visual tokens per image in the model is 4-16384.
|
||||||
# You can set min_pixels and max_pixels according to your needs, such as a token range of 256-1280, to balance performance and cost.
|
# You can set min_pixels and max_pixels according to your needs, such as a token range of 256-1280, to balance performance and cost.
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user