Update preprocessor_config.json

This commit is contained in:
ai-modelscope 2025-02-26 20:05:09 +08:00
parent 78d6bca530
commit 2c6d9ffdae

View File

@ -7,8 +7,6 @@ pipeline_tag: image-text-to-text
tags:
- multimodal
library_name: transformers
base_model:
- Qwen/Qwen2.5-VL-7B-Instruct
---
# Qwen2.5-VL-7B-Instruct
@ -142,13 +140,10 @@ Here we show a code snippet to show you how to use the chat model with `transfor
```python
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
from modelscope import snapshot_download
model_dir=snapshot_download("Qwen/Qwen2.5-VL-7B-Instruct")
# default: Load the model on the available device(s)
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_dir, torch_dtype="auto", device_map="auto"
"Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto"
)
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
@ -160,7 +155,7 @@ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
# )
# default processer
processor = AutoProcessor.from_pretrained(model_dir)
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
# The default range for the number of visual tokens per image in the model is 4-16384.
# You can set min_pixels and max_pixels according to your needs, such as a token range of 256-1280, to balance performance and cost.