Upload README.md with huggingface_hub

This commit is contained in:
ai-modelscope 2025-08-01 07:39:29 +08:00
parent f8b98820e4
commit 9c1b7c2e61

View File

@ -3,7 +3,12 @@ license: mit
library_name: dots_ocr library_name: dots_ocr
pipeline_tag: image-text-to-text pipeline_tag: image-text-to-text
tags: tags:
- image-to-text
- ocr - ocr
- document-parse
- layout
- table
- formula
language: language:
- en - en
- zh - zh
@ -20,7 +25,7 @@ language:
dots.ocr: Multilingual Document Layout Parsing in a Single Vision-Language Model dots.ocr: Multilingual Document Layout Parsing in a Single Vision-Language Model
</h1> </h1>
[![arXiv](https://img.shields.io/badge/Arxiv-dots.ocr-b31b1b.svg?logo=arXiv)]() [![Blog](https://img.shields.io/badge/Blog-View_on_GitHub-333.svg?logo=github)](https://github.com/rednote-hilab/dots.ocr/blob/master/assets/blog.md)
[![HuggingFace](https://img.shields.io/badge/HuggingFace%20Weights-black.svg?logo=HuggingFace)](https://huggingface.co/rednote-hilab/dots.ocr) [![HuggingFace](https://img.shields.io/badge/HuggingFace%20Weights-black.svg?logo=HuggingFace)](https://huggingface.co/rednote-hilab/dots.ocr)
@ -861,7 +866,7 @@ This is an inhouse benchmark which contain 1493 pdf images with 100 languages.
<td>Nanonets OCR</td> <td>Nanonets OCR</td>
<td>67.0</td> <td>67.0</td>
<td>68.6</td> <td>68.6</td>
<td><strong>77.7</strong></td> <td>77.7</td>
<td>39.5</td> <td>39.5</td>
<td>40.7</td> <td>40.7</td>
<td>69.9</td> <td>69.9</td>
@ -1026,7 +1031,7 @@ pip install -e .
### Download Model Weights ### Download Model Weights
> 💡**Note:** Please use a directory name without periods (e.g., `DotsOCR` instead of `dots.ocr`) for the model save path. This is a temporary workaround pending our integration with Transformers. > 💡**Note:** Please use a directory name without periods (e.g., `DotsOCR` instead of `dots.ocr`) for the model save path. This is a temporary workaround pending our integration with Transformers.
```shell ```shell
python tools/download_model.py python3 tools/download_model.py
``` ```
@ -1037,14 +1042,17 @@ The [Docker Image](https://hub.docker.com/r/rednotehilab/dots.ocr) is based on t
```shell ```shell
# You need to register model to vllm at first # You need to register model to vllm at first
export hf_model_path=./weights/DotsOCR # Path to your downloaded model weights python3 tools/download_model.py
export hf_model_path=./weights/DotsOCR # Path to your downloaded model weights, Please use a directory name without periods (e.g., `DotsOCR` instead of `dots.ocr`) for the model save path. This is a temporary workaround pending our integration with Transformers.
export PYTHONPATH=$(dirname "$hf_model_path"):$PYTHONPATH export PYTHONPATH=$(dirname "$hf_model_path"):$PYTHONPATH
sed -i '/^from vllm\.entrypoints\.cli\.main import main$/a\ sed -i '/^from vllm\.entrypoints\.cli\.main import main$/a\
from DotsOCR import modeling_dots_ocr_vllm' `which vllm` from DotsOCR import modeling_dots_ocr_vllm' `which vllm` # If you downloaded model weights by yourself, please replace `DotsOCR` by your model saved directory name, and remember to use a directory name without periods (e.g., `DotsOCR` instead of `dots.ocr`)
# launch vllm server # launch vllm server
CUDA_VISIBLE_DEVICES=0 vllm serve ${hf_model_path} --tensor-parallel-size 1 --gpu-memory-utilization 0.95 --chat-template-content-format string --served-model-name model --trust-remote-code CUDA_VISIBLE_DEVICES=0 vllm serve ${hf_model_path} --tensor-parallel-size 1 --gpu-memory-utilization 0.95 --chat-template-content-format string --served-model-name model --trust-remote-code
# If you get a ModuleNotFoundError: No module named 'DotsOCR', please check the note above on the saved model directory name.
# vllm api demo # vllm api demo
python3 ./demo/demo_vllm.py --prompt_mode prompt_layout_all_en python3 ./demo/demo_vllm.py --prompt_mode prompt_layout_all_en
``` ```