diff --git a/.gitattributes b/.gitattributes index 53d7257..9737fd8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -44,4 +44,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.tar filter=lfs diff=lfs merge=lfs -text *.wasm filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text -*tfevents* filter=lfs diff=lfs merge=lfs -text \ No newline at end of file +*tfevents* filter=lfs diff=lfs merge=lfs -text + +merges.txt filter=lfs diff=lfs merge=lfs -text +vocab.json filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text \ No newline at end of file diff --git a/README.md b/README.md index 6a7b083..76692e6 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,1226 @@ --- -license: Apache License 2.0 - -#model-type: -##如 gpt、phi、llama、chatglm、baichuan 等 -#- gpt - -#domain: -##如 nlp、cv、audio、multi-modal -#- nlp - -#language: -##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa -#- cn - -#metrics: -##如 CIDEr、Blue、ROUGE 等 -#- CIDEr - -#tags: -##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他 -#- pretrained - -#tools: -##如 vllm、fastchat、llamacpp、AdaSeq 等 -#- vllm +license: mit +library_name: dots_ocr +pipeline_tag: image-text-to-text +tags: +- ocr +language: +- en +- zh +- multilingual --- -### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。 -#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型 -SDK下载 -```bash -#安装ModelScope -pip install modelscope +
+ +

+ +

+ +

+dots.ocr: Multilingual Document Layout Parsing in a Single Vision-Language Model +

+ +[![arXiv](https://img.shields.io/badge/Arxiv-dots.ocr-b31b1b.svg?logo=arXiv)]() +[![HuggingFace](https://img.shields.io/badge/HuggingFace%20Weights-black.svg?logo=HuggingFace)](https://huggingface.co/rednote-hilab/dots.ocr) + + +
+ 🖥️ Live Demo | + 💬 WeChat | + 📕 rednote +
+ +
+ + + +## Introduction + +**dots.ocr** is a powerful, multilingual document parser that unifies layout detection and content recognition within a single vision-language model while maintaining good reading order. Despite its compact 1.7B-parameter LLM foundation, it achieves state-of-the-art(SOTA) performance. + +1. **Powerful Performance:** **dots.ocr** achieves SOTA performance for text, tables, and reading order on [OmniDocBench](https://github.com/opendatalab/OmniDocBench), while delivering formula recognition results comparable to much larger models like Doubao-1.5 and gemini2.5-pro. +2. **Multilingual Support:** **dots.ocr** demonstrates robust parsing capabilities for low-resource languages, achieving decisive advantages across both layout detection and content recognition on our in-house multilingual documents benchmark. +3. **Unified and Simple Architecture:** By leveraging a single vision-language model, **dots.ocr** offers a significantly more streamlined architecture than conventional methods that rely on complex, multi-model pipelines. Switching between tasks is accomplished simply by altering the input prompt, proving that a VLM can achieve competitive detection results compared to traditional detection models like DocLayout-YOLO. +4. **Efficient and Fast Performance:** Built upon a compact 1.7B LLM, **dots.ocr** provides faster inference speeds than many other high-performing models based on larger foundations. + + +### Performance Comparison: dots.ocr vs. Competing Models + + +> **Notes:** +> - The EN, ZH metrics are the end2end evaluation results of [OmniDocBench](https://github.com/opendatalab/OmniDocBench), and Multilingual metric is the end2end evaluation results of dots.ocr-bench. + + +## News +* ```2025.07.30 ``` 🚀 We release [dots.ocr](https://github.com/rednote-hilab/dots.ocr), — a multilingual documents parsing model based on 1.7b llm, with SOTA performance. + + + +## Benchmark Results + +### 1. OmniDocBench + +#### The end-to-end evaluation results of different tasks. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Model
Type
MethodsOverallEditTextEditFormulaEditTableTEDSTableEditRead OrderEdit
ENZHENZHENZHENZHENZHENZH
Pipeline
Tools
MinerU0.1500.3570.0610.2150.2780.57778.662.10.1800.3440.0790.292
Marker0.3360.5560.0800.3150.5300.88367.649.20.6190.6850.1140.340
Mathpix0.1910.3650.1050.3840.3060.45477.067.10.2430.3200.1080.304
Docling0.5890.9090.4160.9870.999161.325.00.6270.8100.3130.837
Pix2Text0.3200.5280.1380.3560.2760.61173.666.20.5840.6450.2810.499
Unstructured0.5860.7160.1980.4810.999100.0610.9980.1450.387
OpenParse0.6460.8140.6810.9740.996164.827.50.2840.6390.5950.641
PPStruct-V30.1450.2060.0580.0880.2950.535--0.1590.1090.0690.091
Expert
VLMs
GOT-OCR0.2870.4110.1890.3150.3600.52853.247.20.4590.5200.1410.280
Nougat0.4520.9730.3650.9980.4880.94139.900.5721.0000.3820.954
Mistral OCR0.2680.4390.0720.3250.3180.49575.863.60.6000.6500.0830.284
OLMOCR-sglang0.3260.4690.0970.2930.4550.65568.161.30.6080.6520.1450.277
SmolDocling-256M0.4930.8160.2620.8380.7530.99744.916.50.7290.9070.2270.522
Dolphin0.2060.3060.1070.1970.4470.58077.367.20.1800.2850.0910.162
MinerU 20.1390.2400.0470.1090.2970.53682.579.00.1410.1950.069<0.118
OCRFlux0.1950.2810.0640.1830.3790.61371.681.30.2530.1390.0860.187
MonkeyOCR-pro-3B0.1380.2060.0670.1070.2460.42181.587.50.1390.1110.1000.185
General
VLMs
GPT4o0.2330.3990.1440.4090.4250.60672.062.90.2340.3290.1280.251
Qwen2-VL-72B0.2520.3270.0960.2180.4040.48776.876.40.3870.4080.1190.193
Qwen2.5-VL-72B0.2140.2610.0920.180.3150.43482.983.90.3410.2620.1060.168
Gemini2.5-Pro0.1480.2120.0550.1680.3560.43985.886.40.130.1190.0490.121
doubao-1-5-thinking-vision-pro-2504280.1400.1620.0430.0850.2950.38483.389.30.1650.0850.0580.094
Expert VLMsdots.ocr0.1250.1600.0320.0660.3290.41688.689.00.0990.0920.0400.067
+ + +#### The end-to-end text recognition performance across 9 PDF page types. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Model
Type
ModelsBookSlidesFinancial
Report
TextbookExam
Paper
MagazineAcademic
Papers
NotesNewspaperOverall
Pipeline
Tools
MinerU0.0550.1240.0330.1020.1590.0720.0250.9840.1710.206
Marker0.0740.3400.0890.3190.4520.1530.0590.6510.1920.274
Mathpix0.1310.2200.2020.2160.2780.1470.0910.6340.6900.300
Expert
VLMs
GOT-OCR0.1110.2220.0670.1320.2040.1980.1790.3880.7710.267
Nougat0.7340.9581.0000.8200.9300.8300.2140.9910.8710.806
Dolphin0.0910.1310.0570.1460.2310.1210.0740.3630.3070.177
OCRFlux0.0680.1250.0920.1020.1190.0830.0470.2230.5360.149
MonkeyOCR-pro-3B0.0840.1290.0600.0900.1070.0730.0500.1710.1070.100
General
VLMs
GPT4o0.1570.1630.3480.1870.2810.1730.1460.6070.7510.316
Qwen2.5-VL-7B0.1480.0530.1110.1370.1890.1170.1340.2040.7060.205
InternVL3-8B0.1630.0560.1070.1090.1290.1000.1590.1500.6810.188
doubao-1-5-thinking-vision-pro-2504280.0480.0480.0240.0620.0850.0510.0390.0960.1810.073
Expert VLMsdots.ocr0.0310.0470.0110.0820.0790.0280.0290.1090.0560.055
+ +> **Notes:** +> - The metrics are from [MonkeyOCR](https://github.com/Yuliang-Liu/MonkeyOCR), [OmniDocBench](https://github.com/opendatalab/OmniDocBench), and our own internal evaluations. +> - We delete the Page-header and Page-footer cells in the result markdown. +> - We use tikz_preprocess pipeline to upsample the images to dpi 200. + + +### 2. **dots.ocr-bench** + +This is an inhouse benchmark which contain 1493 pdf images with 100 languages. + +#### The end-to-end evaluation results of different tasks. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodsOverallEditTextEditFormulaEditTableTEDSTableEditRead OrderEdit
MonkeyOCR-3B0.4830.4450.62750.930.4520.409
doubao-1-5-thinking-vision-pro-2504280.2910.2260.44071.20.2600.238
doubao-1-60.2990.2700.41771.00.2580.253
Gemini2.5-Pro0.2510.1630.40277.10.2360.202
dots.ocr 0.1770.0750.29779.20.1860.152
+ +> **Notes:** +> - We use the same metric calculation pipeline of [OmniDocBench](https://github.com/opendatalab/OmniDocBench). +> - We delete the Page-header and Page-footer cells in the result markdown. + +#### Layout Detection + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodF1@IoU=.50:.05:.95↑F1@IoU=.50↑
OverallTextFormulaTablePictureOverallTextFormulaTablePicture
DocLayout-YOLO-DocStructBench0.7330.6940.4800.8030.6190.8060.7790.6200.8580.678
dots.ocr-parse all0.8310.8010.6540.8380.7480.9220.9090.7700.8880.831
dots.ocr-detection only 0.8450.8160.7160.8750.7650.9300.9170.8320.9180.843
+ +> **Notes:** +> - prompt_layout_all_en for **parse all**, prompt_layout_only_en for **detection only**, please refer to [prompts](https://github.com/rednote-hilab/dots.ocr/blob/master/dots_ocr/utils/prompts.py) + + +### 3. olmOCR-bench. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelArXivOld Scans
Math
TablesOld ScansHeaders and
Footers
Multi
column
Long Tiny
Text
BaseOverall
GOT OCR52.752.00.222.193.642.029.994.048.3 ± 1.1
Marker76.057.957.627.884.972.984.699.170.1 ± 1.1
MinerU75.447.460.917.396.659.039.196.661.5 ± 1.1
Mistral OCR77.267.560.629.393.671.377.199.472.0 ± 1.1
Nanonets OCR67.068.677.739.540.769.953.499.364.5 ± 1.1
GPT-4o
(No Anchor)
51.575.569.140.994.268.954.196.768.9 ± 1.1
GPT-4o
(Anchored)
53.574.570.040.793.869.360.696.869.9 ± 1.1
Gemini Flash 2
(No Anchor)
32.156.361.427.848.058.784.494.057.8 ± 1.1
Gemini Flash 2
(Anchored)
54.556.172.134.264.761.571.595.663.8 ± 1.2
Qwen 2 VL
(No Anchor)
19.731.724.217.188.98.36.855.531.5 ± 0.9
Qwen 2.5 VL
(No Anchor)
63.165.767.338.673.668.349.198.365.5 ± 1.2
olmOCR v0.1.75
(No Anchor)
71.571.471.442.894.177.771.097.874.7 ± 1.1
olmOCR v0.1.75
(Anchored)
74.971.271.042.294.578.373.398.375.5 ± 1.0
MonkeyOCR-pro-3B83.868.874.636.191.276.680.195.375.8 ± 1.0
dots.ocr82.164.288.340.994.182.481.299.579.1 ± 1.0
+ + +> **Note:** +> - The metrics are from [MonkeyOCR](https://github.com/Yuliang-Liu/MonkeyOCR), +[olmocr](https://github.com/allenai/olmocr), and our own internal evaluations. +> - We delete the Page-header and Page-footer cells in the result markdown. + + + +# Quick Start +## 1. Installation +### Install dots.ocr +```shell +conda create -n dots_ocr python=3.12 +conda activate dots_ocr + +git clone https://github.com/rednote-hilab/dots.ocr.git +cd dots.ocr + +# Install pytorch, see https://pytorch.org/get-started/previous-versions/ for your cuda version +pip install torch==2.7.0 torchvision==0.22.0 torchaudio==2.7.0 --index-url https://download.pytorch.org/whl/cu128 +pip install -e . ``` + +If you have trouble with the installation, try our [Docker Image](https://hub.docker.com/r/rednotehilab/dots.ocr) for an easier setup, and follow these steps: +```shell +git clone https://github.com/rednote-hilab/dots.ocr.git +cd dots.ocr +pip install -e . +``` + + +### Download Model Weights +> 💡**Note:** Please use a directory name without periods (e.g., `DotsOCR` instead of `dots.ocr`) for the model save path. This is a temporary workaround pending our integration with Transformers. +```shell +python tools/download_model.py +``` + + +## 2. Deployment +### vLLM inference +We highly recommend using vllm for deployment and inference. All of our evaluations results are based on vllm version 0.9.1. +The [Docker Image](https://hub.docker.com/r/rednotehilab/dots.ocr) is based on the official vllm image. You can also follow [Dockerfile](https://github.com/rednote-hilab/dots.ocr/blob/master/docker/Dockerfile) to build the deployment environment by yourself. + +```shell +# You need to register model to vllm at first +export hf_model_path=./weights/DotsOCR # Path to your downloaded model weights +export PYTHONPATH=$(dirname "$hf_model_path"):$PYTHONPATH +sed -i '/^from vllm\.entrypoints\.cli\.main import main$/a\ +from DotsOCR import modeling_dots_ocr_vllm' `which vllm` + +# launch vllm server +CUDA_VISIBLE_DEVICES=0 vllm serve ${hf_model_path} --tensor-parallel-size 1 --gpu-memory-utilization 0.95 --chat-template-content-format string --served-model-name model --trust-remote-code + +# vllm api demo +python3 ./demo/demo_vllm.py --prompt_mode prompt_layout_all_en +``` + +### Hugginface inference +```shell +python3 demo/demo_hf.py +``` + +
+Hugginface inference details + ```python -#SDK模型下载 -from modelscope import snapshot_download -model_dir = snapshot_download('rednote-hilab/dots.ocr') -``` -Git下载 -``` -#Git模型下载 -git clone https://www.modelscope.cn/rednote-hilab/dots.ocr.git +import torch +from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer +from qwen_vl_utils import process_vision_info +from dots_ocr.utils import dict_promptmode_to_prompt + +model_path = "./weights/DotsOCR" +model = AutoModelForCausalLM.from_pretrained( + model_path, + attn_implementation="flash_attention_2", + torch_dtype=torch.bfloat16, + device_map="auto", + trust_remote_code=True +) +processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True) + +image_path = "demo/demo_image1.jpg" +prompt = """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox. + +1. Bbox format: [x1, y1, x2, y2] + +2. Layout Categories: The possible categories are ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. + +3. Text Extraction & Formatting Rules: + - Picture: For the 'Picture' category, the text field should be omitted. + - Formula: Format its text as LaTeX. + - Table: Format its text as HTML. + - All Others (Text, Title, etc.): Format their text as Markdown. + +4. Constraints: + - The output text must be the original text from the image, with no translation. + - All layout elements must be sorted according to human reading order. + +5. Final Output: The entire output must be a single JSON object. +""" + +messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "image": image_path + }, + {"type": "text", "text": prompt} + ] + } + ] + +# Preparation for inference +text = processor.apply_chat_template( + messages, + tokenize=False, + add_generation_prompt=True +) +image_inputs, video_inputs = process_vision_info(messages) +inputs = processor( + text=[text], + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt", +) + +inputs = inputs.to("cuda") + +# Inference: Generation of the output +generated_ids = model.generate(**inputs, max_new_tokens=24000) +generated_ids_trimmed = [ + out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) +] +output_text = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False +) +print(output_text) + ``` -

如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。

\ No newline at end of file +
+ +## 3. Document Parse +**Based on vLLM server**, you can parse an image or a pdf file using the following commands: +```bash + +# Parse all layout info, both detection and recognition +# Parse a single image +python3 dots_ocr/parser.py demo/demo_image1.jpg +# Parse a single PDF +python3 dots_ocr/parser.py demo/demo_pdf1.pdf --num_threads 64 # try bigger num_threads for pdf with a large number of pages + +# Layout detection only +python3 dots_ocr/parser.py demo/demo_image1.jpg --prompt prompt_layout_only_en + +# Parse text only, except Page-header and Page-footer +python3 dots_ocr/parser.py demo/demo_image1.jpg --prompt prompt_ocr + +# Parse layout info by bbox +python3 dots_ocr/parser.py demo/demo_image1.jpg --prompt prompt_grounding_ocr --bbox 163 241 1536 705 + +``` + +
+Output Results + +1. **Structured Layout Data** (`demo_image1.json`): A JSON file containing the detected layout elements, including their bounding boxes, categories, and extracted text. +2. **Processed Markdown File** (`demo_image1.md`): A Markdown file generated from the concatenated text of all detected cells. + * An additional version, `demo_image1_nohf.md`, is also provided, which excludes page headers and footers for compatibility with benchmarks like Omnidocbench and olmOCR-bench. +3. **Layout Visualization** (`demo_image1.jpg`): The original image with the detected layout bounding boxes drawn on it. + +
+ +## 4. Demo +You can run the demo with the following command, or try directly at [live demo](https://dotsocr.xiaohongshu.com/) +```bash +python demo/demo_gradio.py +``` + +We also provide a demo for grounding ocr: +```bash +python demo/demo_gradio_annotion.py +``` + + +### Example for formula document +formula1.png +formula2.png +formula3.png + +### Example for table document +table1.png +table2.png +table3.png + +### Example for multilingual document +Tibetan.png +tradition_zh.png +nl.png +kannada.png +russian.png + +### Example for reading order +reading_order.png + +### Example for grounding ocr +grounding.png + + +## Acknowledgments +We would like to thank [Qwen2.5-VL](https://github.com/QwenLM/Qwen2.5-VL), [aimv2](https://github.com/apple/ml-aim), [MonkeyOCR](https://github.com/Yuliang-Liu/MonkeyOCR), +[OmniDocBench](https://github.com/opendatalab/OmniDocBench), [PyMuPDF](https://github.com/pymupdf/PyMuPDF), for providing code and models. + +We also thank [DocLayNet](https://github.com/DS4SD/DocLayNet), [M6Doc](https://github.com/HCIILAB/M6Doc), [CDLA](https://github.com/buptlihang/CDLA), [D4LA](https://github.com/AlibabaResearch/AdvancedLiterateMachinery) for providing valuable datasets. + +## Limitation & Future Work + +- **Complex Document Elements:** + - **Table&Formula**: dots.ocr is not yet perfect for high-complexity tables and formula extraction. + - **Picture**: Pictures in documents are currently not parsed. + +- **Parsing Failures:** The model may fail to parse under certain conditions: + - When the character-to-pixel ratio is excessively high. Try enlarging the image or increasing the PDF parsing DPI (a setting of 200 is recommended). However, please note that the model performs optimally on images with a resolution under 11289600 pixels. + - Continuous special characters, such as ellipses (`...`) and underscores (`_`), may cause the prediction output to repeat endlessly. In such scenarios, consider using alternative prompts like `prompt_layout_only_en`, `prompt_ocr`, or `prompt_grounding_ocr` ([details here](https://github.com/rednote-hilab/dots.ocr/blob/master/dots_ocr/utils/prompts.py)). + +- **Performance Bottleneck:** Despite its 1.7B parameter LLM foundation, **dots.ocr** is not yet optimized for high-throughput processing of large PDF volumes. + +We are committed to achieving more accurate table and formula parsing, as well as enhancing the model's OCR capabilities for broader generalization, all while aiming for **a more powerful, more efficient model**. Furthermore, we are actively considering the development of **a more general-purpose perception model** based on Vision-Language Models (VLMs), which would integrate general detection, image captioning, and OCR tasks into a unified framework. **Parsing the content of the pictures in the documents** is also a key priority for our future work. +We believe that collaboration is the key to tackling these exciting challenges. If you are passionate about advancing the frontiers of document intelligence and are interested in contributing to these future endeavors, we would love to hear from you. Please reach out to us via email at: [yanqing4@xiaohongshu.com]. diff --git a/chat_template.json b/chat_template.json new file mode 100644 index 0000000..87a662f --- /dev/null +++ b/chat_template.json @@ -0,0 +1,3 @@ +{ + "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{%- for m in messages %}{%- if m.role == 'system' %}{{- '<|system|>' + m.content + '<|endofsystem|>\n' }}{%- elif m.role == 'user' %}{% if m.content is string %}{{- '<|user|>' + m.content + '<|endofuser|>' }}{% else %} {% for content in m.content %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|img|><|imgpad|><|endofimg|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|img|><|video_pad|><|endofimg|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{%- endif %}{%- elif m.role == 'assistant' %}{{- '<|assistant|>' + m.content }}{%- if not loop.last %}{{- '<|endofassistant|>' }}{%- endif %}{%- endif %}{%- endfor %}{%- if messages[-1].role != 'assistant' %}{{- '<|assistant|>' }}{%- endif %}" +} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..7ea2c09 --- /dev/null +++ b/config.json @@ -0,0 +1,51 @@ +{ + "architectures": [ + "DotsOCRForCausalLM" + ], + "model_type": "dots_ocr", + "auto_map": { + "AutoConfig": "configuration_dots.DotsOCRConfig", + "AutoModelForCausalLM": "modeling_dots_ocr.DotsOCRForCausalLM" + }, + "attention_bias": true, + "attention_dropout": 0.0, + "hidden_act": "silu", + "hidden_size": 1536, + "initializer_range": 0.02, + "intermediate_size": 8960, + "max_position_embeddings": 131072, + "max_window_layers": 28, + "num_attention_heads": 12, + "num_hidden_layers": 28, + "num_key_value_heads": 2, + "rms_norm_eps": 1e-06, + "rope_scaling": null, + "rope_theta": 1000000, + "sliding_window": 131072, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.51.0", + "use_cache": true, + "use_sliding_window": false, + "vocab_size": 151936, + "image_token_id": 151665, + "video_token_id": 151656, + "vision_config": { + "embed_dim": 1536, + "hidden_size": 1536, + "intermediate_size": 4224, + "num_hidden_layers": 42, + "num_attention_heads": 12, + "num_channels": 3, + "patch_size": 14, + "post_norm": true, + "rms_norm_eps": 1e-05, + "spatial_merge_size": 2, + "temporal_patch_size": 1, + "use_bias": false, + "attn_implementation": "flash_attention_2", + "init_merger_std": 0.02, + "initializer_range": 0.02, + "is_causal": false + } +} \ No newline at end of file diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..4aef15d --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "image-text-to-text", "allow_remote": true} \ No newline at end of file diff --git a/configuration_dots.py b/configuration_dots.py new file mode 100644 index 0000000..55901c2 --- /dev/null +++ b/configuration_dots.py @@ -0,0 +1,76 @@ +from typing import Any, Optional +from transformers.configuration_utils import PretrainedConfig +from transformers.models.qwen2 import Qwen2Config +from transformers import Qwen2_5_VLProcessor, AutoProcessor +from transformers.models.auto.configuration_auto import CONFIG_MAPPING + + +class DotsVisionConfig(PretrainedConfig): + model_type: str = "dots_vit" + + def __init__( + self, + embed_dim: int = 1536, # vision encoder embed size + hidden_size: int = 1536, # after merger hidden size + intermediate_size: int = 4224, + num_hidden_layers: int = 42, + num_attention_heads: int = 12, + num_channels: int = 3, + patch_size: int = 14, + spatial_merge_size: int = 2, + temporal_patch_size: int = 1, + rms_norm_eps: float = 1e-5, + use_bias: bool = False, + attn_implementation="flash_attention_2", # "eager","sdpa","flash_attention_2" + initializer_range=0.02, + init_merger_std=0.02, + is_causal=False, # ve causal forward + post_norm=True, + gradient_checkpointing=False, + **kwargs: Any, + ): + super().__init__(**kwargs) + self.embed_dim = embed_dim + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.spatial_merge_size = spatial_merge_size + self.temporal_patch_size = temporal_patch_size + self.rms_norm_eps = rms_norm_eps + self.use_bias = use_bias + self.attn_implementation = attn_implementation + self.initializer_range = initializer_range + self.init_merger_std = init_merger_std + self.is_causal = is_causal + self.post_norm = post_norm + self.gradient_checkpointing = gradient_checkpointing + + + +class DotsOCRConfig(Qwen2Config): + model_type = "dots_ocr" + def __init__(self, + image_token_id = 151665, + video_token_id = 151656, + vision_config: Optional[dict] = None, *args, **kwargs): + super().__init__(*args, **kwargs) + self.image_token_id = image_token_id + self.video_token_id = video_token_id + self.vision_config = DotsVisionConfig(**(vision_config or {})) + + def save_pretrained(self, save_directory, **kwargs): + self._auto_class = None + super().save_pretrained(save_directory, **kwargs) + + +class DotsVLProcessor(Qwen2_5_VLProcessor): + def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs): + super().__init__(image_processor, tokenizer, chat_template=chat_template) + self.image_token = "<|imgpad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token + + +AutoProcessor.register("dots_ocr", DotsVLProcessor) +CONFIG_MAPPING.register("dots_ocr", DotsOCRConfig) diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..c6f6906 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,7 @@ +{ + "max_length": 32768, + "eos_token_id": [ + 151643, + 151673 + ] +} diff --git a/merges.txt b/merges.txt new file mode 100644 index 0000000..7ce1d95 --- /dev/null +++ b/merges.txt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:599bab54075088774b1733fde865d5bd747cbcc7a547c5bc12610e874e26f5e3 +size 1671839 diff --git a/model-00001-of-00002.safetensors b/model-00001-of-00002.safetensors new file mode 100644 index 0000000..6a5014f --- /dev/null +++ b/model-00001-of-00002.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:686da8b6a33f88d4fc83092a1f601dbb12ca4e639942190771a61f5cc287aa24 +size 135 diff --git a/model-00002-of-00002.safetensors b/model-00002-of-00002.safetensors new file mode 100644 index 0000000..09e3c4a --- /dev/null +++ b/model-00002-of-00002.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d00568ee09e48f30b2ebd2b2245f6fa07bb2e1afcc3c0300f8298d6a52abf49 +size 135 diff --git a/model.safetensors.index.json b/model.safetensors.index.json new file mode 100644 index 0000000..8a62c07 --- /dev/null +++ b/model.safetensors.index.json @@ -0,0 +1,650 @@ +{ + "metadata": { + "total_size": 6078358528 + }, + "weight_map": { + "lm_head.weight": "model-00001-of-00002.safetensors", + "model.embed_tokens.weight": "model-00001-of-00002.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.24.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.24.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.24.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.24.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.24.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.24.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.25.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.25.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.25.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.25.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.25.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.25.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.26.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.26.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.26.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.26.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.26.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.26.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.27.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.27.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.27.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.27.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.27.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.27.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.27.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors", + "model.norm.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.0.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.0.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.0.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.0.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.0.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.0.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.0.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.1.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.1.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.1.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.1.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.1.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.1.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.1.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.10.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.10.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.10.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.10.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.10.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.10.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.10.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.11.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.11.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.11.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.11.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.11.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.11.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.11.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.12.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.12.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.12.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.12.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.12.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.12.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.12.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.13.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.13.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.13.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.13.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.13.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.13.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.13.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.14.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.14.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.14.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.14.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.14.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.14.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.14.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.15.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.15.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.15.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.15.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.15.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.15.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.15.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.16.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.16.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.16.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.16.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.16.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.16.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.16.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.17.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.17.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.17.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.17.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.17.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.17.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.17.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.18.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.18.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.18.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.18.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.18.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.18.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.18.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.19.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.19.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.19.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.19.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.19.mlp.fc3.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.19.norm1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.19.norm2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.2.attn.proj.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.2.attn.qkv.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.2.mlp.fc1.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.2.mlp.fc2.weight": "model-00001-of-00002.safetensors", + "vision_tower.blocks.2.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.2.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.2.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.20.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.20.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.20.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.20.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.20.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.20.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.20.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.21.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.21.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.21.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.21.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.21.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.21.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.21.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.22.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.22.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.22.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.22.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.22.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.22.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.22.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.23.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.23.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.23.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.23.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.23.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.23.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.23.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.24.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.24.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.24.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.24.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.24.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.24.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.24.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.25.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.25.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.25.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.25.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.25.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.25.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.25.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.26.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.26.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.26.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.26.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.26.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.26.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.26.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.27.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.27.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.27.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.27.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.27.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.27.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.27.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.28.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.28.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.28.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.28.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.28.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.28.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.28.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.29.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.29.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.29.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.29.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.29.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.29.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.29.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.3.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.3.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.3.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.3.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.3.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.3.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.3.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.30.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.30.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.30.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.30.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.30.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.30.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.30.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.31.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.31.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.31.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.31.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.31.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.31.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.31.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.32.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.32.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.32.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.32.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.32.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.32.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.32.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.33.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.33.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.33.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.33.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.33.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.33.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.33.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.34.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.34.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.34.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.34.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.34.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.34.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.34.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.35.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.35.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.35.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.35.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.35.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.35.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.35.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.36.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.36.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.36.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.36.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.36.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.36.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.36.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.37.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.37.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.37.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.37.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.37.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.37.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.37.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.38.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.38.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.38.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.38.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.38.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.38.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.38.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.39.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.39.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.39.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.39.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.39.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.39.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.39.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.4.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.4.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.4.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.4.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.4.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.4.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.4.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.40.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.40.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.40.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.40.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.40.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.40.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.40.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.41.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.41.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.41.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.41.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.41.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.41.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.41.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.5.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.5.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.5.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.5.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.5.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.5.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.5.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.6.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.6.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.6.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.6.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.6.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.6.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.6.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.7.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.7.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.7.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.7.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.7.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.7.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.7.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.8.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.8.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.8.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.8.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.8.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.8.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.8.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.9.attn.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.9.attn.qkv.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.9.mlp.fc1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.9.mlp.fc2.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.9.mlp.fc3.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.9.norm1.weight": "model-00002-of-00002.safetensors", + "vision_tower.blocks.9.norm2.weight": "model-00002-of-00002.safetensors", + "vision_tower.merger.ln_q.bias": "model-00002-of-00002.safetensors", + "vision_tower.merger.ln_q.weight": "model-00002-of-00002.safetensors", + "vision_tower.merger.mlp.0.bias": "model-00002-of-00002.safetensors", + "vision_tower.merger.mlp.0.weight": "model-00002-of-00002.safetensors", + "vision_tower.merger.mlp.2.bias": "model-00002-of-00002.safetensors", + "vision_tower.merger.mlp.2.weight": "model-00002-of-00002.safetensors", + "vision_tower.patch_embed.patchifier.norm.weight": "model-00002-of-00002.safetensors", + "vision_tower.patch_embed.patchifier.proj.bias": "model-00002-of-00002.safetensors", + "vision_tower.patch_embed.patchifier.proj.weight": "model-00002-of-00002.safetensors", + "vision_tower.post_trunk_norm.weight": "model-00002-of-00002.safetensors" + } +} \ No newline at end of file diff --git a/modeling_dots_ocr.py b/modeling_dots_ocr.py new file mode 100644 index 0000000..79d1c25 --- /dev/null +++ b/modeling_dots_ocr.py @@ -0,0 +1,131 @@ +from typing import List, Optional, Tuple, Union + +import torch +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.models.qwen2 import Qwen2ForCausalLM + +from .configuration_dots import DotsVisionConfig, DotsOCRConfig +from .modeling_dots_vision import DotsVisionTransformer + + +DOTS_VLM_MAX_IMAGES = 200 + + +class DotsOCRForCausalLM(Qwen2ForCausalLM): + config_class = DotsOCRConfig + + def __init__(self, config: DotsOCRConfig): + super().__init__(config) + + if isinstance(self.config.vision_config, dict): + vision_config = DotsVisionConfig(**self.config.vision_config) + self.config.vision_config = vision_config + else: + vision_config = self.config.vision_config + + self.vision_tower = DotsVisionTransformer(vision_config) + + def prepare_inputs_embeds( + self, + input_ids: torch.LongTensor, + pixel_values: Optional[torch.FloatTensor] = None, + grid_thw: Optional[torch.FloatTensor] = None, + img_mask: Optional[torch.BoolTensor] = None, + ) -> torch.Tensor: + inputs_embeds = self.get_input_embeddings()(input_ids) + + if pixel_values is not None: + assert img_mask is not None + if grid_thw.shape[0] > DOTS_VLM_MAX_IMAGES: + print( + f"Num image exceeded: {grid_thw.shape[0]} > {DOTS_VLM_MAX_IMAGES}, which may cause FSDP hang" + ) + + vision_embeddings = self.vision_tower(pixel_values, grid_thw) + + true_indices = torch.nonzero(img_mask).squeeze() + if len(true_indices) > vision_embeddings.size(0): + print( + f"img_mask sum > VE and will be truncated, mask.sum()={len(true_indices)} {vision_embeddings.size(0)=}" + ) + true_indices = true_indices[: vision_embeddings.size(0)] + new_img_mask = torch.zeros_like(img_mask, device=img_mask.device) + new_img_mask[true_indices[:, 0], true_indices[:, 1]] = True + else: + new_img_mask = img_mask + + assert ( + vision_embeddings.size(0) == new_img_mask.sum() + ), f"{vision_embeddings.size(0)=}, {new_img_mask.sum()=}" + + inputs_embeds = inputs_embeds.masked_scatter( + new_img_mask.to(inputs_embeds.device).unsqueeze(-1).expand_as(inputs_embeds), + vision_embeddings.to(inputs_embeds.device).type(inputs_embeds.dtype), + ) + + return inputs_embeds + + def forward( + self, + input_ids: torch.LongTensor, + pixel_values: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + use_cache: Optional[bool] = None, + logits_to_keep: int = 0, + **loss_kwargs, + ) -> Union[Tuple, CausalLMOutputWithPast]: + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + assert len(input_ids) >= 1, f"empty input_ids {input_ids.shape=} will cause gradnorm nan" + if inputs_embeds is None: + img_mask = input_ids == self.config.image_token_id + inputs_embeds = self.prepare_inputs_embeds(input_ids, pixel_values, image_grid_thw, img_mask) + + outputs = super().forward( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + labels=labels, + use_cache=use_cache if use_cache is not None else self.config.use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + # return_dict=return_dict, + logits_to_keep=logits_to_keep, + **loss_kwargs, + ) + + return outputs + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + inputs_embeds=None, + pixel_values=None, + attention_mask=None, + cache_position=None, + num_logits_to_keep=None, + **kwargs, + ): + model_inputs = super().prepare_inputs_for_generation( + input_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + cache_position=cache_position, + num_logits_to_keep=num_logits_to_keep, + **kwargs, + ) + + if cache_position[0] == 0: + model_inputs["pixel_values"] = pixel_values + + return model_inputs diff --git a/modeling_dots_ocr_vllm.py b/modeling_dots_ocr_vllm.py new file mode 100644 index 0000000..07195e8 --- /dev/null +++ b/modeling_dots_ocr_vllm.py @@ -0,0 +1,429 @@ +from functools import cached_property +from typing import Iterable, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union + +import torch +import torch.nn as nn +from transformers.models.qwen2_vl import Qwen2VLImageProcessor, Qwen2VLProcessor +from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize +from vllm import ModelRegistry +from vllm.config import VllmConfig +from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler +from vllm.model_executor.models.interfaces import MultiModalEmbeddings, SupportsMultiModal +from vllm.model_executor.models.qwen2 import Qwen2ForCausalLM +from vllm.model_executor.models.qwen2_5_vl import ( + Qwen2_5_VLMultiModalProcessor, + Qwen2_5_VLProcessingInfo, +) +from vllm.model_executor.models.qwen2_vl import Qwen2VLDummyInputsBuilder +from vllm.model_executor.models.utils import ( + AutoWeightsLoader, + WeightsMapper, + init_vllm_registered_model, + maybe_prefix, + merge_multimodal_embeddings, +) +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import MultiModalDataDict +from vllm.multimodal.parse import ImageSize +from vllm.sequence import IntermediateTensors + +from .configuration_dots import DotsVisionConfig +from .configuration_dots import DotsOCRConfig +from .modeling_dots_vision import DotsVisionTransformer + + +class DotsOCRImagePixelInputs(TypedDict): + type: Literal["pixel_values", "image_grid_thw"] + + pixel_values: torch.Tensor + image_grid_thw: torch.Tensor + + +class DotsOCRImageEmbeddingInputs(TypedDict): + type: Literal["image_embeds", "image_grid_thw"] + image_embeds: torch.Tensor + """Supported types: + - List[`torch.Tensor`]: A list of tensors holding all images' features. + Each tensor holds an image's features. + - `torch.Tensor`: A tensor holding all images' features + (concatenation of all images' feature tensors). + + Tensor shape: `(num_image_features, hidden_size)` + - `num_image_features` varies based on + the number and resolution of the images. + - `hidden_size` must match the hidden size of language model backbone. + """ + + image_grid_thw: torch.Tensor + + +DotsOCRImageInputs = Union[DotsOCRImagePixelInputs, DotsOCRImageEmbeddingInputs] + + +class DotsOCRMultiModalProcessor(Qwen2_5_VLMultiModalProcessor): + pass + + +class DotsOCRDummyInputsBuilder(Qwen2VLDummyInputsBuilder): + def get_dummy_mm_data( + self, + seq_len: int, + mm_counts: Mapping[str, int], + ) -> MultiModalDataDict: + num_images = mm_counts.get("image", 0) + + target_width, target_height = self.info.get_image_size_with_most_features() + + return { + "image": self._get_dummy_images(width=target_width, height=target_height, num_images=num_images), + } + + +class DotsOCRProcessingInfo(Qwen2_5_VLProcessingInfo): + def get_hf_config(self) -> DotsOCRConfig: + config = self.ctx.get_hf_config() + if not config.__class__.__name__ == 'DotsOCRConfig': + raise TypeError(f"Expected DotsOCRConfig, got {type(config)}") + + if hasattr(config, "vision_config") and isinstance(config.vision_config, dict): + config.vision_config = DotsVisionConfig(**config.vision_config) + + return config + + def get_hf_processor( + self, + *, + min_pixels: Optional[int] = None, + max_pixels: Optional[int] = None, + size: Optional[dict[str, int]] = None, + **kwargs: object, + ) -> Qwen2VLProcessor: + processor = self.ctx.get_hf_processor( + Qwen2VLProcessor, + image_processor=self.get_image_processor(min_pixels=min_pixels, max_pixels=max_pixels, size=size), + **kwargs, + ) + processor.image_token = "<|imgpad|>" + processor.video_token = "<|video_pad|>" + return processor + + def _get_vision_info( + self, + *, + image_width: int, + image_height: int, + num_frames: int = 1, + do_resize: bool = True, + image_processor: Optional[Qwen2VLImageProcessor], + ) -> tuple[ImageSize, int]: + if image_processor is None: + image_processor = self.get_image_processor() + + hf_config: DotsOCRConfig = self.get_hf_config() + vision_config = hf_config.vision_config + patch_size = vision_config.patch_size + merge_size = vision_config.spatial_merge_size + temporal_patch_size = vision_config.temporal_patch_size + + if do_resize: + resized_height, resized_width = smart_resize( + height=image_height, + width=image_width, + factor=patch_size * merge_size, + min_pixels=image_processor.min_pixels, + max_pixels=image_processor.max_pixels, + ) + preprocessed_size = ImageSize(width=resized_width, height=resized_height) + else: + preprocessed_size = ImageSize(width=image_width, height=image_height) + + # NOTE: Frames are padded to be divisible by `temporal_patch_size` + # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py#L294 + padded_num_frames = num_frames + num_frames % temporal_patch_size + + grid_t = max(padded_num_frames // temporal_patch_size, 1) + grid_h = preprocessed_size.height // patch_size + grid_w = preprocessed_size.width // patch_size + + num_patches = grid_t * grid_h * grid_w + num_vision_tokens = num_patches // (merge_size**2) + + return preprocessed_size, num_vision_tokens + + +@MULTIMODAL_REGISTRY.register_processor( + Qwen2_5_VLMultiModalProcessor, + info=DotsOCRProcessingInfo, + dummy_inputs=DotsOCRDummyInputsBuilder, +) +class DotsOCRForCausalLM(nn.Module, SupportsMultiModal): + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + "lm_head.": "language_model.lm_head.", + "model.": "language_model.model.", + } + ) + _tp_plan = {} + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + + self.config: DotsOCRConfig = vllm_config.model_config.hf_config + self.quant_config = vllm_config.quant_config + self.multimodal_config = vllm_config.model_config.multimodal_config + + if isinstance(self.config.vision_config, dict): + vision_config = DotsVisionConfig(**self.config.vision_config) + self.config.vision_config = vision_config + else: + vision_config = self.config.vision_config + + self.vision_tower = DotsVisionTransformer(vision_config) + self.language_model: Qwen2ForCausalLM = init_vllm_registered_model( + vllm_config=vllm_config, + hf_config=self.config, + prefix=maybe_prefix(prefix, "language_model"), + architectures=["Qwen2ForCausalLM"], + ) + + @cached_property + def sampler(self): + if hasattr(self.language_model, "sampler"): + return self.language_model.sampler + + return get_sampler() + + def _validate_and_reshape_mm_tensor(self, mm_input: object, name: str) -> torch.Tensor: + if not isinstance(mm_input, (torch.Tensor, list)): + raise ValueError(f"Incorrect type of {name}. " f"Got type: {type(mm_input)}") + if isinstance(mm_input, torch.Tensor): + if mm_input.ndim == 2: + return mm_input + if mm_input.ndim != 3: + raise ValueError( + f"{name} should be 2D or batched 3D tensor. " + f"Got ndim: {mm_input.ndim} " + f"(shape={mm_input.shape})" + ) + return torch.concat(list(mm_input)) + else: + return torch.concat(mm_input) + + def _parse_and_validate_image_input(self, **kwargs: object) -> Optional[DotsOCRImageInputs]: + pixel_values = kwargs.pop("pixel_values", None) + image_embeds = kwargs.pop("image_embeds", None) + image_grid_thw = kwargs.pop("image_grid_thw", None) + + if pixel_values is None and image_embeds is None: + return None + + if pixel_values is not None: + pixel_values = self._validate_and_reshape_mm_tensor(pixel_values, "image pixel values") + image_grid_thw = self._validate_and_reshape_mm_tensor(image_grid_thw, "image grid_thw") + + if not isinstance(pixel_values, (torch.Tensor, list)): + raise ValueError("Incorrect type of image pixel values. " f"Got type: {type(pixel_values)}") + + return DotsOCRImagePixelInputs( + type="pixel_values", pixel_values=pixel_values, image_grid_thw=image_grid_thw + ) + + if image_embeds is not None: + image_embeds = self._validate_and_reshape_mm_tensor(image_embeds, "image embeds") + image_grid_thw = self._validate_and_reshape_mm_tensor(image_grid_thw, "image grid_thw") + + if not isinstance(image_embeds, torch.Tensor): + raise ValueError("Incorrect type of image embeddings. " f"Got type: {type(image_embeds)}") + return DotsOCRImageEmbeddingInputs( + type="image_embeds", image_embeds=image_embeds, image_grid_thw=image_grid_thw + ) + + def vision_forward(self, pixel_values: torch.Tensor, image_grid_thw: torch.Tensor): + from vllm.distributed import ( + get_tensor_model_parallel_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + ) + + assert self.vision_tower is not None + + tp_rank = get_tensor_model_parallel_rank() + tp = get_tensor_model_parallel_world_size() + + image_grid_thw_chunk = image_grid_thw.chunk(tp) + image_sizes_consum = torch.tensor([i.prod(-1).sum() for i in image_grid_thw_chunk]).cumsum(dim=0) + merge_size_square = self.vision_tower.config.spatial_merge_size**2 + image_embedding = torch.zeros( + ( + pixel_values.shape[0] // merge_size_square, + self.vision_tower.config.hidden_size, + ), + device=pixel_values.device, + dtype=pixel_values.dtype, + ) + + if tp_rank < len(image_sizes_consum): + idx_start = 0 if tp_rank == 0 else image_sizes_consum[tp_rank - 1].item() + idx_end = image_sizes_consum[tp_rank].item() + pixel_values_part = pixel_values[idx_start:idx_end] + image_grid_thw_part = image_grid_thw_chunk[tp_rank] + image_embedding_part = self.vision_tower(pixel_values_part, image_grid_thw_part) + image_embedding[idx_start // merge_size_square : idx_end // merge_size_square] = image_embedding_part + + group = get_tensor_model_parallel_group().device_group + torch.distributed.all_reduce(image_embedding, group=group) + return image_embedding + + def _process_image_input(self, image_input: DotsOCRImageInputs) -> tuple[torch.Tensor, ...]: + grid_thw = image_input["image_grid_thw"] + assert grid_thw.ndim == 2 + + if image_input["type"] == "image_embeds": + image_embeds = image_input["image_embeds"].type(self.vision_tower.dtype) + else: + pixel_values = image_input["pixel_values"].type(self.vision_tower.dtype) + image_embeds = self.vision_forward(pixel_values, grid_thw)[ + :, : self.config.hidden_size + ] + + # Split concatenated embeddings for each image item. + merge_size = self.vision_tower.config.spatial_merge_size + sizes = grid_thw.prod(-1) // merge_size // merge_size + + return image_embeds.split(sizes.tolist()) + + def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict: + modalities = {} + + # Preserve the order of modalities if there are multiple of them + # from the order of kwargs. + for input_key in kwargs: + if input_key in ("pixel_values", "image_embeds") and "images" not in modalities: + modalities["images"] = self._parse_and_validate_image_input(**kwargs) + return modalities + + def get_language_model(self) -> torch.nn.Module: + return self.language_model + + def get_multimodal_embeddings(self, **kwargs: object) -> Optional[MultiModalEmbeddings]: + modalities = self._parse_and_validate_multimodal_inputs(**kwargs) + if not modalities: + return None + + # The result multimodal_embeddings is tuple of tensors, with each + # tensor correspoending to a multimodal data item (image or video). + multimodal_embeddings: tuple[torch.Tensor, ...] = () + + # NOTE: It is important to iterate over the keys in this dictionary + # to preserve the order of the modalities. + for modality in modalities: + if modality == "images": + image_input = modalities["images"] + vision_embeddings = self._process_image_input(image_input) + multimodal_embeddings += vision_embeddings + + return multimodal_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[MultiModalEmbeddings] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, + inputs_embeds, + multimodal_embeddings, + [self.config.image_token_id, self.config.video_token_id], + ) + + return inputs_embeds + + def get_input_embeddings_v0( + self, + input_ids: torch.Tensor, + image_input: Optional[DotsOCRImagePixelInputs] = None, + ) -> torch.Tensor: + inputs_embeds = self.get_input_embeddings(input_ids) + if image_input is not None: + image_embeds = self._process_image_input(image_input) + inputs_embeds = merge_multimodal_embeddings( + input_ids, + inputs_embeds, + image_embeds, + placeholder_token_id=self.config.image_token_id, + ) + return inputs_embeds + + def forward( + self, + input_ids: Optional[torch.Tensor], + positions: torch.Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, + **kwargs, + ) -> Union[torch.Tensor, IntermediateTensors]: + if intermediate_tensors is not None: + inputs_embeds = None + elif inputs_embeds is None and kwargs.get("pixel_values") is not None: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + inputs_embeds = None + else: + assert input_ids is not None + inputs_embeds = self.get_input_embeddings_v0( + input_ids, + image_input=image_input, + ) + input_ids = None + + hidden_states = self.language_model( + input_ids=input_ids, + positions=positions, + intermediate_tensors=intermediate_tensors, + inputs_embeds=inputs_embeds, + ) + + return hidden_states + + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[torch.Tensor]: + return self.language_model.compute_logits(hidden_states, sampling_metadata) + + def sample( + self, + logits: Optional[torch.Tensor], + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: + loader = AutoWeightsLoader(self) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) + + +def patch_vllm_chat_placeholder(): + from vllm.entrypoints.chat_utils import BaseMultiModalItemTracker + + ori = BaseMultiModalItemTracker._placeholder_str + + def _placeholder_str(self, modality, current_count: int) -> Optional[str]: + hf_config = self._model_config.hf_config + model_type = hf_config.model_type + if modality in ("image",) and model_type in ["dots_ocr"]: + return "<|img|><|imgpad|><|endofimg|>" + return ori(self, modality, current_count) + + BaseMultiModalItemTracker._placeholder_str = _placeholder_str + +ModelRegistry.register_model( + "DotsOCRForCausalLM", DotsOCRForCausalLM, +) + +patch_vllm_chat_placeholder() \ No newline at end of file diff --git a/modeling_dots_vision.py b/modeling_dots_vision.py new file mode 100644 index 0000000..62a1bdf --- /dev/null +++ b/modeling_dots_vision.py @@ -0,0 +1,405 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint +from flash_attn import flash_attn_varlen_func +from torch.nn import LayerNorm +from transformers.modeling_utils import PreTrainedModel +from .configuration_dots import DotsVisionConfig + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb_vision(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor: + orig_dtype = tensor.dtype + tensor = tensor.float() + + cos = freqs.cos() + sin = freqs.sin() + + cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float() + sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float() + + output = (tensor * cos) + (rotate_half(tensor) * sin) + + output = output.to(orig_dtype) + + return output + + +class VisionRotaryEmbedding(nn.Module): + def __init__(self, dim: int, theta: float = 10000.0) -> None: + super().__init__() + inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + def forward(self, seqlen: int) -> torch.Tensor: + seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) + freqs = torch.outer(seq, self.inv_freq) + return freqs + + +class PatchMerger(nn.Module): + def __init__( + self, + dim: int, + context_dim: int, + spatial_merge_size: int = 2, + pre_norm="layernorm", + init_merger_std=None, + ) -> None: + super().__init__() + self.hidden_size = context_dim * (spatial_merge_size**2) + self.pre_norm = pre_norm + if self.pre_norm == "layernorm": + self.ln_q = LayerNorm(context_dim, eps=1e-6) + elif self.pre_norm == "rmsnorm": + self.ln_q = RMSNorm(context_dim, eps=1e-6) + else: + print("no norm in patch merger") + + self.mlp = nn.Sequential( + nn.Linear(self.hidden_size, self.hidden_size), + nn.GELU(), + nn.Linear(self.hidden_size, dim), + ) + + if init_merger_std is not None: + nn.init.normal_(self.mlp[0].weight, mean=0.0, std=init_merger_std) + nn.init.zeros_(self.mlp[0].bias) + nn.init.normal_(self.mlp[2].weight, mean=0.0, std=init_merger_std) + nn.init.zeros_(self.mlp[2].bias) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.pre_norm: + x = self.mlp(self.ln_q(x).view(-1, self.hidden_size)) + else: + x = self.mlp(x.view(-1, self.hidden_size)) + return x + + +class VisionAttention(nn.Module): + def __init__(self, config, dim: int, num_heads: int = 16, bias=True) -> None: + super().__init__() + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.qkv = nn.Linear(dim, dim * 3, bias=bias) + self.proj = nn.Linear(dim, dim, bias=bias) + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: torch.Tensor = None, + ) -> torch.Tensor: + seq_length = hidden_states.shape[0] + + q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0) + k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0) + + attention_mask = torch.full( + [1, seq_length, seq_length], torch.finfo(q.dtype).min, device=q.device, dtype=q.dtype + ) + for i in range(1, len(cu_seqlens)): + attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0 + + q = q.transpose(0, 1) + k = k.transpose(0, 1) + v = v.transpose(0, 1) + attn_weights = torch.matmul(q, k.transpose(1, 2)) / math.sqrt(self.head_dim) + attn_weights = attn_weights + attention_mask + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype) + attn_output = torch.matmul(attn_weights, v) + attn_output = attn_output.transpose(0, 1) + attn_output = attn_output.reshape(seq_length, -1) + attn_output = self.proj(attn_output) + return attn_output + + +class VisionFlashAttention2(nn.Module): + def __init__(self, config, dim: int, num_heads: int = 16, bias=True) -> None: + super().__init__() + self.num_heads = num_heads + self.qkv = nn.Linear(dim, dim * 3, bias=bias) + self.proj = nn.Linear(dim, dim, bias=bias) + self.config = config + self.is_causal = config.is_causal + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: torch.Tensor = None, + ) -> torch.Tensor: + seq_length = hidden_states.shape[0] + q, k, v = ( + self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + ) # 'shd' + q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0) + k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0) + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() + attn_output = flash_attn_varlen_func( + q, k, v, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen, causal=self.is_causal + ).reshape(seq_length, -1) + attn_output = self.proj(attn_output) + + return attn_output + + +class VisionSdpaAttention(nn.Module): + def __init__(self, config, dim: int, num_heads: int = 16, bias=True) -> None: + super().__init__() + self.num_heads = num_heads + self.qkv = nn.Linear(dim, dim * 3, bias=bias) + self.proj = nn.Linear(dim, dim, bias=bias) + self.config = config + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: torch.Tensor = None, + ) -> torch.Tensor: + seq_length = hidden_states.shape[0] + q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + + q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0) + k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0) + + attention_mask = torch.zeros([1, seq_length, seq_length], device=q.device, dtype=torch.bool) + for i in range(1, len(cu_seqlens)): + attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = True + + q = q.transpose(0, 1) + k = k.transpose(0, 1) + v = v.transpose(0, 1) + + attn_output = F.scaled_dot_product_attention(q, k, v, attention_mask, dropout_p=0.0) + attn_output = attn_output.transpose(0, 1) + attn_output = attn_output.reshape(seq_length, -1) + + attn_output = self.proj(attn_output) + return attn_output + + +DOTS_VISION_ATTENTION_CLASSES = { + "eager": VisionAttention, + "flash_attention_2": VisionFlashAttention2, + "sdpa": VisionSdpaAttention, +} + + +class RMSNorm(nn.Module): + def __init__(self, dim: int, eps: float = 1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(dim)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + output = self._norm(x.float()).type_as(x) + return output * self.weight + + def extra_repr(self) -> str: + return f"{tuple(self.weight.shape)}, eps={self.eps}" + + def _norm(self, x: torch.Tensor) -> torch.Tensor: + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + +class DotsSwiGLUFFN(nn.Module): + def __init__(self, config): + super().__init__() + hidden_features = config.intermediate_size + in_features = config.embed_dim + bias = config.use_bias + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) + self.fc2 = nn.Linear(hidden_features, in_features, bias=bias) + self.fc3 = nn.Linear(in_features, hidden_features, bias=bias) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.silu(self.fc1(x)) * self.fc3(x) + x = self.fc2(x) + return x + + + +class DotsPatchEmbed(nn.Module): + def __init__(self, config): + super().__init__() + self.num_channels = config.num_channels + self.patch_size = config.patch_size + self.temporal_patch_size = config.temporal_patch_size + self.embed_dim = config.embed_dim + self.config = config + self.proj = nn.Conv2d( + config.num_channels, + config.embed_dim, + kernel_size=(config.patch_size, config.patch_size), + stride=(config.patch_size, config.patch_size), + ) + self.norm = RMSNorm(config.embed_dim, eps=config.rms_norm_eps) + + def forward(self, x: torch.Tensor, grid_thw=None) -> torch.Tensor: + x = x.view(-1, self.num_channels, self.temporal_patch_size, self.patch_size, self.patch_size)[:, :, 0] + x = self.proj(x).view(-1, self.embed_dim) + x = self.norm(x) + return x + + +class DotsViTPreprocessor(nn.Module): + def __init__(self, config): + super().__init__() + self.patch_h = config.patch_size + self.patch_w = config.patch_size + self.embed_dim = config.embed_dim + self.config = config + self.patchifier = DotsPatchEmbed(config) + + def forward(self, x: torch.Tensor, grid_thw=None) -> torch.Tensor: + tokens = self.patchifier(x, grid_thw) + return tokens + + +class DotsVisionBlock(nn.Module): + def __init__(self, config, attn_implementation: str = "flash_attention_2"): + super().__init__() + self.attn = DOTS_VISION_ATTENTION_CLASSES[attn_implementation]( + config, config.embed_dim, num_heads=config.num_attention_heads, bias=config.use_bias + ) + self.norm1 = RMSNorm(config.embed_dim, eps=config.rms_norm_eps) + self.mlp = DotsSwiGLUFFN(config) + self.norm2 = RMSNorm(config.embed_dim, eps=config.rms_norm_eps) + + def forward(self, hidden_states, cu_seqlens, rotary_pos_emb) -> torch.Tensor: + hidden_states = hidden_states + self.attn( + self.norm1(hidden_states), cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb + ) + hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) + return hidden_states + + +class DotsVisionTransformer(PreTrainedModel): + def __init__(self, config: DotsVisionConfig) -> None: + super().__init__(config) + self.config = config + self.spatial_merge_size = config.spatial_merge_size + + self.patch_embed = DotsViTPreprocessor(config) + self._init_weights(self.patch_embed.patchifier.proj) + + head_dim = config.embed_dim // config.num_attention_heads + + self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2) + + _num_hidden_layers = config.num_hidden_layers + self.blocks = nn.ModuleList( + [DotsVisionBlock(config, config.attn_implementation) for _ in range(_num_hidden_layers)] + ) + + if self.config.post_norm: + self.post_trunk_norm = RMSNorm(config.embed_dim, eps=config.rms_norm_eps) + + self.merger = PatchMerger( + dim=config.hidden_size, + context_dim=config.embed_dim, + spatial_merge_size=config.spatial_merge_size, + init_merger_std=self.config.init_merger_std, + ) + + self.gradient_checkpointing = False + self._gradient_checkpointing_func = torch.utils.checkpoint.checkpoint + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, (nn.Linear, nn.Conv3d)): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + @property + def dtype(self) -> torch.dtype: + return self.blocks[0].mlp.fc2.weight.dtype + + @property + def device(self) -> torch.device: + return self.blocks[0].mlp.fc2.weight.device + + def get_pos_ids_by_grid(self, grid_thw): + pos_ids = [] + for t, h, w in grid_thw: + hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) + hpos_ids = hpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + hpos_ids = hpos_ids.permute(0, 2, 1, 3) + hpos_ids = hpos_ids.flatten() + + wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) + wpos_ids = wpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + wpos_ids = wpos_ids.permute(0, 2, 1, 3) + wpos_ids = wpos_ids.flatten() + pos_ids.append( + torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1) + ) + + return pos_ids + + def rot_pos_emb(self, grid_thw): + pos_ids = self.get_pos_ids_by_grid(grid_thw) + pos_ids = torch.cat(pos_ids, dim=0) + max_grid_size = grid_thw[:, 1:].max() + rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) + rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) + return rotary_pos_emb + + def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, bf16=True) -> torch.Tensor: + if bf16: + hidden_states = hidden_states.bfloat16() + hidden_states = self.patch_embed(hidden_states, grid_thw) + + rotary_pos_emb = self.rot_pos_emb(grid_thw) + + cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( + dim=0, + dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, + ) + cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) + + for blk in self.blocks: + if self.gradient_checkpointing and self.training: + hidden_states = self._gradient_checkpointing_func( + blk.__call__, + hidden_states, + cu_seqlens, + rotary_pos_emb, + use_reentrant=(self.config.ckpt_use_reentrant or self.config.ve_ckpt_use_reentrant), + ) + else: + hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb) + + if self.config.post_norm: + hidden_states = self.post_trunk_norm(hidden_states) + + hidden_states = self.merger(hidden_states) + return hidden_states \ No newline at end of file diff --git a/preprocessor_config.json b/preprocessor_config.json new file mode 100644 index 0000000..5786e21 --- /dev/null +++ b/preprocessor_config.json @@ -0,0 +1,19 @@ +{ + "min_pixels": 3136, + "max_pixels": 11289600, + "patch_size": 14, + "temporal_patch_size": 1, + "merge_size": 2, + "image_mean": [ + 0.48145466, + 0.4578275, + 0.40821073 + ], + "image_std": [ + 0.26862954, + 0.26130258, + 0.27577711 + ], + "image_processor_type": "Qwen2VLImageProcessor", + "processor_class": "DotsVLProcessor" +} diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..ec36c2f --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,25 @@ +{ + "additional_special_tokens": [ + "<|im_start|>", + "<|im_end|>", + "<|object_ref_start|>", + "<|object_ref_end|>", + "<|box_start|>", + "<|box_end|>", + "<|quad_start|>", + "<|quad_end|>", + "<|vision_start|>", + "<|vision_end|>", + "<|vision_pad|>", + "<|image_pad|>", + "<|video_pad|>" + ], + "eos_token": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "[PAD]" +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..2a7818e --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:386545eb05f08c51352cde2fcc2c867f1592bb330f305efd1c6a57a93b1244cd +size 7036028 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..a7055e5 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,391 @@ +{ + "add_bos_token": false, + "add_prefix_space": false, + "added_tokens_decoder": { + "151643": { + "content": "<|endoftext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151644": { + "content": "<|im_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151645": { + "content": "<|im_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151646": { + "content": "<|object_ref_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151647": { + "content": "<|object_ref_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151648": { + "content": "<|box_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151649": { + "content": "<|box_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151650": { + "content": "<|quad_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151651": { + "content": "<|quad_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151652": { + "content": "<|vision_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151653": { + "content": "<|vision_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151654": { + "content": "<|vision_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151655": { + "content": "<|image_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151656": { + "content": "<|video_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151657": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151658": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151659": { + "content": "<|fim_prefix|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151660": { + "content": "<|fim_middle|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151661": { + "content": "<|fim_suffix|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151662": { + "content": "<|fim_pad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151663": { + "content": "<|repo_name|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151664": { + "content": "<|file_sep|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151665": { + "content": "<|imgpad|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151666": { + "content": "<|img|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151667": { + "content": "<|endofimg|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151668": { + "content": "<|systemprompt|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151669": { + "content": "<|endofsystemprompt|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151670": { + "content": "<|user|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151671": { + "content": "<|endofuser|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151672": { + "content": "<|assistant|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151673": { + "content": "<|endofassistant|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151674": { + "content": "<|ref_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151675": { + "content": "<|ref_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151676": { + "content": "[SEP]", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151677": { + "content": "<|pic|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151678": { + "content": "<|text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151679": { + "content": "<|pictotext|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151680": { + "content": "[PAD]", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151681": { + "content": "<|slice|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151682": { + "content": "<|endofslice|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151683": { + "content": "<|imgrowend|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151684": { + "content": "<|polygon_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151685": { + "content": "<|polygon_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151686": { + "content": "<|image_gen_start|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "151687": { + "content": "<|image_gen_end|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "<|im_start|>", + "<|im_end|>", + "<|object_ref_start|>", + "<|object_ref_end|>", + "<|box_start|>", + "<|box_end|>", + "<|quad_start|>", + "<|quad_end|>", + "<|vision_start|>", + "<|vision_end|>", + "<|vision_pad|>", + "<|image_pad|>", + "<|video_pad|>" + ], + "bos_token": null, + "chat_template": "{%- for m in messages %}\n {%- if m.role == 'system' %}\n {{- '<|system|>' + m.content + '<|endofsystem|>\\n' }}\n {%- elif m.role == 'user' %}\n {{- '<|user|>' + m.content + '<|endofuser|>' }}\n {%- elif m.role == 'assistant' %}\n {{- '<|assistant|>' + m.content }}\n {%- if not loop.last %}\n {{- '<|endofassistant|>' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if messages[-1].role != 'assistant' %}\n {{- '<|assistant|>' }}\n{%- endif %}", + "clean_up_tokenization_spaces": false, + "eos_token": "<|endoftext|>", + "errors": "replace", + "model_max_length": 131072, + "pad_token": "[PAD]", + "split_special_tokens": false, + "tokenizer_class": "Qwen2Tokenizer", + "unk_token": null +} diff --git a/vocab.json b/vocab.json new file mode 100644 index 0000000..6c49fc6 --- /dev/null +++ b/vocab.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca10d7e9fb3ed18575dd1e277a2579c16d108e32f27439684afa0e10b1440910 +size 2776833