diff --git a/README.md b/README.md index 29542c0..5f16903 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,6 @@ pipeline_tag: image-text-to-text tags: - multimodal library_name: transformers -base_model: -- Qwen/Qwen2.5-VL-7B-Instruct --- # Qwen2.5-VL-7B-Instruct @@ -142,13 +140,10 @@ Here we show a code snippet to show you how to use the chat model with `transfor ```python from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor from qwen_vl_utils import process_vision_info -from modelscope import snapshot_download - -model_dir=snapshot_download("Qwen/Qwen2.5-VL-7B-Instruct") # default: Load the model on the available device(s) model = Qwen2_5_VLForConditionalGeneration.from_pretrained( - model_dir, torch_dtype="auto", device_map="auto" + "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios. @@ -160,7 +155,7 @@ model = Qwen2_5_VLForConditionalGeneration.from_pretrained( # ) # default processer -processor = AutoProcessor.from_pretrained(model_dir) +processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct") # The default range for the number of visual tokens per image in the model is 4-16384. # You can set min_pixels and max_pixels according to your needs, such as a token range of 256-1280, to balance performance and cost.