Compare commits

..

17 Commits

Author SHA1 Message Date
4pdadmin
48de788604 'solveConflict' 2026-04-13 14:50:52 +08:00
ai-modelscope
fcd9221ff0 Create LICENSE 2025-07-08 00:26:27 +08:00
ai-modelscope
2809627603 Update README.md with TEI support (#16)
- Update README.md (3e955468f0e8204455096de62f1ae222bee9702f)

Co-authored-by: Alvaro Bartolome <alvarobartt@users.noreply.huggingface.co>
2025-06-23 00:14:59 +08:00
yingda
9d1483d7db Update README.md 2025-06-22 14:22:43 +00:00
yingda
94eb80727c Update README.md 2025-06-22 14:22:41 +00:00
ai-modelscope
8a49ec4a26 Update README.md with TEI support (#16)
- Update README.md (3e955468f0e8204455096de62f1ae222bee9702f)

Co-authored-by: Alvaro Bartolome <alvarobartt@users.noreply.huggingface.co>
2025-06-21 00:10:54 +08:00
ai-modelscope
56f8ecd8be add use case for vllm & modify Citation (#5)
- add use case for vllm & modify Citation (7c0a850d147a83b59e7422f38fb6a672df0c52d5)

Co-authored-by: yanzhao <zyznull@users.noreply.huggingface.co>
2025-06-12 00:23:52 +08:00
Cherrytest
90b66b21a0 Update README.md 2025-06-11 12:20:04 +00:00
ai-modelscope
6e41de8b06 add use case for vllm & modify Citation (#5)
- add use case for vllm & modify Citation (7c0a850d147a83b59e7422f38fb6a672df0c52d5)

Co-authored-by: yanzhao <zyznull@users.noreply.huggingface.co>
2025-06-08 00:14:53 +08:00
ai-modelscope
807d9e22a8 Automatically add EOS via Tokenizer, integrate Sentence Transformers (#1)
- Automatically add EOS via Tokenizer, integrate Sentence Transformers (fd17b9cd89d6cc5b416d4b66ea25da0bea7f2bb0)
- Remove eod_id line from README (7bd6fbe3c54b9ec2b4b1cc3a052720a76fcf0d90)
2025-06-07 00:12:29 +08:00
ai-modelscope
9b8853c96a Update README.md 2025-06-06 00:03:06 +08:00
ai-modelscope
98a780ea18 Delete special_tokens_map.json 2025-06-05 20:57:10 +08:00
ai-modelscope
97c301461c Update README.md 2025-06-05 17:56:07 +08:00
Cherrytest
6d1603f428 Upload folder using ModelScope SDK 2025-06-04 06:15:52 +00:00
Cherrytest
c0cad47273 Upload folder using ModelScope SDK 2025-06-04 06:15:51 +00:00
Cherrytest
7330a56b96 System update meta information 2025-06-04 06:12:44 +00:00
Cherrytest
711cd83c8d System init .gitattributes 2025-06-04 06:12:43 +00:00
18 changed files with 152625 additions and 1 deletions

47
.gitattributes vendored
View File

@ -1,22 +1,66 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
<<<<<<< HEAD
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
=======
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
>>>>>>> 39c791a222aad8db18054d37f07b55953499906d
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
<<<<<<< HEAD
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
=======
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
>>>>>>> 39c791a222aad8db18054d37f07b55953499906d
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
<<<<<<< HEAD
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text
*.tfevents* filter=lfs diff=lfs merge=lfs -text
*.db* filter=lfs diff=lfs merge=lfs -text
*.ark* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.gguf* filter=lfs diff=lfs merge=lfs -text
*.ggml filter=lfs diff=lfs merge=lfs -text
*.llamafile* filter=lfs diff=lfs merge=lfs -text
*.pt2 filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text
=======
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
@ -33,4 +77,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
*.EncryptBy4pd filter=lfs diff=lfs merge=lfs -text
*.EncryptBy4pd filter=lfs diff=lfs merge=lfs -text
>>>>>>> 39c791a222aad8db18054d37f07b55953499906d

10
1_Pooling/config.json Normal file
View File

@ -0,0 +1,10 @@
{
"word_embedding_dimension": 4096,
"pooling_mode_cls_token": false,
"pooling_mode_mean_tokens": false,
"pooling_mode_max_tokens": false,
"pooling_mode_mean_sqrt_len_tokens": false,
"pooling_mode_weightedmean_tokens": false,
"pooling_mode_lasttoken": true,
"include_prompt": true
}

202
LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2024 Alibaba Cloud
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

285
README.md Normal file
View File

@ -0,0 +1,285 @@
---
license: apache-2.0
base_model:
- Qwen/Qwen3-8B-Base
tags:
- transformers
- sentence-transformers
- sentence-similarity
- feature-extraction
- text-embeddings-inference
---
# Qwen3-Embedding-8B
<p align="center">
<img src="https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/logo_qwen3.png" width="400"/>
<p>
## Highlights
The Qwen3 Embedding model series is the latest proprietary model of the Qwen family, specifically designed for text embedding and ranking tasks. Building upon the dense foundational models of the Qwen3 series, it provides a comprehensive range of text embeddings and reranking models in various sizes (0.6B, 4B, and 8B). This series inherits the exceptional multilingual capabilities, long-text understanding, and reasoning skills of its foundational model. The Qwen3 Embedding series represents significant advancements in multiple text embedding and ranking tasks, including text retrieval, code retrieval, text classification, text clustering, and bitext mining.
**Exceptional Versatility**: The embedding model has achieved state-of-the-art performance across a wide range of downstream application evaluations. The 8B size embedding model ranks **No.1** in the MTEB multilingual leaderboard (as of June 5, 2025, score **70.58**), while the reranking model excels in various text retrieval scenarios.
**Comprehensive Flexibility**: The Qwen3 Embedding series offers a full spectrum of sizes (from 0.6B to 8B) for both embedding and reranking models, catering to diverse use cases that prioritize efficiency and effectiveness. Developers can seamlessly combine these two modules. Additionally, the embedding model allows for flexible vector definitions across all dimensions, and both embedding and reranking models support user-defined instructions to enhance performance for specific tasks, languages, or scenarios.
**Multilingual Capability**: The Qwen3 Embedding series offer support for over 100 languages, thanks to the multilingual capabilites of Qwen3 models. This includes various programming languages, and provides robust multilingual, cross-lingual, and code retrieval capabilities.
**Qwen3-Embedding-8B** has the following features:
- Model Type: Text Embedding
- Supported Languages: 100+ Languages
- Number of Paramaters: 8B
- Context Length: 32k
- Embedding Dimension: Up to 4096, supports user-defined output dimensions ranging from 32 to 4096
For more details, including benchmark evaluation, hardware requirements, and inference performance, please refer to our [blog](https://qwenlm.github.io/blog/qwen3-embedding/), [GitHub](https://github.com/QwenLM/Qwen3-Embedding).
## Qwen3 Embedding Series Model list
| Model Type | Models | Size | Layers | Sequence Length | Embedding Dimension | MRL Support | Instruction Aware |
|------------------|----------------------|------|--------|-----------------|---------------------|-------------|----------------|
| Text Embedding | [Qwen3-Embedding-0.6B](https://huggingface.co/Qwen/Qwen3-Embedding-0.6B) | 0.6B | 28 | 32K | 1024 | Yes | Yes |
| Text Embedding | [Qwen3-Embedding-4B](https://huggingface.co/Qwen/Qwen3-Embedding-4B) | 4B | 36 | 32K | 2560 | Yes | Yes |
| Text Embedding | [Qwen3-Embedding-8B](https://huggingface.co/Qwen/Qwen3-Embedding-8B) | 8B | 36 | 32K | 4096 | Yes | Yes |
| Text Reranking | [Qwen3-Reranker-0.6B](https://huggingface.co/Qwen/Qwen3-Reranker-0.6B) | 0.6B | 28 | 32K | - | - | Yes |
| Text Reranking | [Qwen3-Reranker-4B](https://huggingface.co/Qwen/Qwen3-Reranker-4B) | 4B | 36 | 32K | - | - | Yes |
| Text Reranking | [Qwen3-Reranker-8B](https://huggingface.co/Qwen/Qwen3-Reranker-8B) | 8B | 36 | 32K | - | - | Yes |
> **Note**:
> - `MRL Support` indicates whether the embedding model supports custom dimensions for the final embedding.
> - `Instruction Aware` notes whether the embedding or reranking model supports customizing the input instruction according to different tasks.
> - Our evaluation indicates that, for most downstream tasks, using instructions (instruct) typically yields an improvement of 1% to 5% compared to not using them. Therefore, we recommend that developers create tailored instructions specific to their tasks and scenarios. In multilingual contexts, we also advise users to write their instructions in English, as most instructions utilized during the model training process were originally written in English.
## Usage
With Transformers versions earlier than 4.51.0, you may encounter the following error:
```
KeyError: 'qwen3'
```
### Sentence Transformers Usage
```python
# Requires transformers>=4.51.0
# Requires sentence-transformers>=2.7.0
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer("Qwen/Qwen3-Embedding-8B")
# We recommend enabling flash_attention_2 for better acceleration and memory saving,
# together with setting `padding_side` to "left":
# model = SentenceTransformer(
# "Qwen/Qwen3-Embedding-8B",
# model_kwargs={"attn_implementation": "flash_attention_2", "device_map": "auto"},
# tokenizer_kwargs={"padding_side": "left"},
# )
# The queries and documents to embed
queries = [
"What is the capital of China?",
"Explain gravity",
]
documents = [
"The capital of China is Beijing.",
"Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun.",
]
# Encode the queries and documents. Note that queries benefit from using a prompt
# Here we use the prompt called "query" stored under `model.prompts`, but you can
# also pass your own prompt via the `prompt` argument
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.7493, 0.0751],
# [0.0880, 0.6318]])
```
### Transformers Usage
```python
# Requires transformers>=4.51.0
import torch
import torch.nn.functional as F
from torch import Tensor
from transformers import AutoTokenizer, AutoModel
def last_token_pool(last_hidden_states: Tensor,
attention_mask: Tensor) -> Tensor:
left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
if left_padding:
return last_hidden_states[:, -1]
else:
sequence_lengths = attention_mask.sum(dim=1) - 1
batch_size = last_hidden_states.shape[0]
return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]
def get_detailed_instruct(task_description: str, query: str) -> str:
return f'Instruct: {task_description}\nQuery:{query}'
# Each query must come with a one-sentence instruction that describes the task
task = 'Given a web search query, retrieve relevant passages that answer the query'
queries = [
get_detailed_instruct(task, 'What is the capital of China?'),
get_detailed_instruct(task, 'Explain gravity')
]
# No need to add instruction for retrieval documents
documents = [
"The capital of China is Beijing.",
"Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun."
]
input_texts = queries + documents
tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen3-Embedding-8B', padding_side='left')
model = AutoModel.from_pretrained('Qwen/Qwen3-Embedding-8B')
# We recommend enabling flash_attention_2 for better acceleration and memory saving.
# model = AutoModel.from_pretrained('Qwen/Qwen3-Embedding-8B', attn_implementation="flash_attention_2", torch_dtype=torch.float16).cuda()
max_length = 8192
# Tokenize the input texts
batch_dict = tokenizer(
input_texts,
padding=True,
truncation=True,
max_length=max_length,
return_tensors="pt",
)
batch_dict.to(model.device)
outputs = model(**batch_dict)
embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
# normalize embeddings
embeddings = F.normalize(embeddings, p=2, dim=1)
scores = (embeddings[:2] @ embeddings[2:].T)
print(scores.tolist())
# [[0.7493016123771667, 0.0750647559762001], [0.08795969933271408, 0.6318399906158447]]
```
### vLLM Usage
```python
# Requires vllm>=0.8.5
import torch
import vllm
from vllm import LLM
def get_detailed_instruct(task_description: str, query: str) -> str:
return f'Instruct: {task_description}\nQuery:{query}'
# Each query must come with a one-sentence instruction that describes the task
task = 'Given a web search query, retrieve relevant passages that answer the query'
queries = [
get_detailed_instruct(task, 'What is the capital of China?'),
get_detailed_instruct(task, 'Explain gravity')
]
# No need to add instruction for retrieval documents
documents = [
"The capital of China is Beijing.",
"Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun."
]
input_texts = queries + documents
model = LLM(model="Qwen/Qwen3-Embedding-8B", task="embed")
outputs = model.embed(input_texts)
embeddings = torch.tensor([o.outputs.embedding for o in outputs])
scores = (embeddings[:2] @ embeddings[2:].T)
print(scores.tolist())
# [[0.7482624650001526, 0.07556197047233582], [0.08875375241041183, 0.6300010681152344]]
```
📌 **Tip**: We recommend that developers customize the `instruct` according to their specific scenarios, tasks, and languages. Our tests have shown that in most retrieval scenarios, not using an `instruct` on the query side can lead to a drop in retrieval performance by approximately 1% to 5%.
### Text Embeddings Inference (TEI) Usage
You can either run / deploy TEI on NVIDIA GPUs as:
```bash
docker run --gpus all -p 8080:80 -v hf_cache:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.7.2 --model-id Qwen/Qwen3-Embedding-8B --dtype float16
```
Or on CPU devices as:
```bash
docker run -p 8080:80 -v hf_cache:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.7.2 --model-id Qwen/Qwen3-Embedding-8B --dtype float16
```
And then, generate the embeddings sending a HTTP POST request as:
```bash
curl http://localhost:8080/embed \
-X POST \
-d '{"inputs": ["Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery: What is the capital of China?", "Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery: Explain gravity"]}' \
-H "Content-Type: application/json"
```
## Evaluation
### MTEB (Multilingual)
| Model | Size | Mean (Task) | Mean (Type) | Bitxt Mining | Class. | Clust. | Inst. Retri. | Multi. Class. | Pair. Class. | Rerank | Retri. | STS |
|----------------------------------|:-------:|:-------------:|:-------------:|:--------------:|:--------:|:--------:|:--------------:|:---------------:|:--------------:|:--------:|:--------:|:------:|
| NV-Embed-v2 | 7B | 56.29 | 49.58 | 57.84 | 57.29 | 40.80 | 1.04 | 18.63 | 78.94 | 63.82 | 56.72 | 71.10|
| GritLM-7B | 7B | 60.92 | 53.74 | 70.53 | 61.83 | 49.75 | 3.45 | 22.77 | 79.94 | 63.78 | 58.31 | 73.33|
| BGE-M3 | 0.6B | 59.56 | 52.18 | 79.11 | 60.35 | 40.88 | -3.11 | 20.1 | 80.76 | 62.79 | 54.60 | 74.12|
| multilingual-e5-large-instruct | 0.6B | 63.22 | 55.08 | 80.13 | 64.94 | 50.75 | -0.40 | 22.91 | 80.86 | 62.61 | 57.12 | 76.81|
| gte-Qwen2-1.5B-instruct | 1.5B | 59.45 | 52.69 | 62.51 | 58.32 | 52.05 | 0.74 | 24.02 | 81.58 | 62.58 | 60.78 | 71.61|
| gte-Qwen2-7b-Instruct | 7B | 62.51 | 55.93 | 73.92 | 61.55 | 52.77 | 4.94 | 25.48 | 85.13 | 65.55 | 60.08 | 73.98|
| text-embedding-3-large | - | 58.93 | 51.41 | 62.17 | 60.27 | 46.89 | -2.68 | 22.03 | 79.17 | 63.89 | 59.27 | 71.68|
| Cohere-embed-multilingual-v3.0 | - | 61.12 | 53.23 | 70.50 | 62.95 | 46.89 | -1.89 | 22.74 | 79.88 | 64.07 | 59.16 | 74.80|
| gemini-embedding-exp-03-07 | - | 68.37 | 59.59 | 79.28 | 71.82 | 54.59 | 5.18 | **29.16** | 83.63 | 65.58 | 67.71 | 79.40|
| **Qwen3-Embedding-0.6B** | 0.6B | 64.33 | 56.00 | 72.22 | 66.83 | 52.33 | 5.09 | 24.59 | 80.83 | 61.41 | 64.64 | 76.17|
| **Qwen3-Embedding-4B** | 4B | 69.45 | 60.86 | 79.36 | 72.33 | 57.15 | **11.56** | 26.77 | 85.05 | 65.08 | 69.60 | 80.86|
| **Qwen3-Embedding-8B** | 8B | **70.58** | **61.69** | **80.89** | **74.00** | **57.65** | 10.06 | 28.66 | **86.40** | **65.63** | **70.88** | **81.08** |
> **Note**: For compared models, the scores are retrieved from MTEB online [leaderboard](https://huggingface.co/spaces/mteb/leaderboard) on May 24th, 2025.
### MTEB (Eng v2)
| MTEB English / Models | Param. | Mean(Task) | Mean(Type) | Class. | Clust. | Pair Class. | Rerank. | Retri. | STS | Summ. |
|--------------------------------|:--------:|:------------:|:------------:|:--------:|:--------:|:-------------:|:---------:|:--------:|:-------:|:-------:|
| multilingual-e5-large-instruct | 0.6B | 65.53 | 61.21 | 75.54 | 49.89 | 86.24 | 48.74 | 53.47 | 84.72 | 29.89 |
| NV-Embed-v2 | 7.8B | 69.81 | 65.00 | 87.19 | 47.66 | 88.69 | 49.61 | 62.84 | 83.82 | 35.21 |
| GritLM-7B | 7.2B | 67.07 | 63.22 | 81.25 | 50.82 | 87.29 | 49.59 | 54.95 | 83.03 | 35.65 |
| gte-Qwen2-1.5B-instruct | 1.5B | 67.20 | 63.26 | 85.84 | 53.54 | 87.52 | 49.25 | 50.25 | 82.51 | 33.94 |
| stella_en_1.5B_v5 | 1.5B | 69.43 | 65.32 | 89.38 | 57.06 | 88.02 | 50.19 | 52.42 | 83.27 | 36.91 |
| gte-Qwen2-7B-instruct | 7.6B | 70.72 | 65.77 | 88.52 | 58.97 | 85.9 | 50.47 | 58.09 | 82.69 | 35.74 |
| gemini-embedding-exp-03-07 | - | 73.3 | 67.67 | 90.05 | **59.39** | **87.7** | 48.59 | 64.35 | 85.29 | **38.28** |
| **Qwen3-Embedding-0.6B** | 0.6B | 70.70 | 64.88 | 85.76 | 54.05 | 84.37 | 48.18 | 61.83 | 86.57 | 33.43 |
| **Qwen3-Embedding-4B** | 4B | 74.60 | 68.10 | 89.84 | 57.51 | 87.01 | 50.76 | 68.46 | **88.72** | 34.39 |
| **Qwen3-Embedding-8B** | 8B | **75.22** | **68.71** | **90.43** | 58.57 | 87.52 | **51.56** | **69.44** | 88.58 | 34.83 |
### C-MTEB (MTEB Chinese)
| C-MTEB | Param. | Mean(Task) | Mean(Type) | Class. | Clust. | Pair Class. | Rerank. | Retr. | STS |
|------------------|--------|------------|------------|--------|--------|-------------|---------|-------|-------|
| multilingual-e5-large-instruct | 0.6B | 58.08 | 58.24 | 69.80 | 48.23 | 64.52 | 57.45 | 63.65 | 45.81 |
| bge-multilingual-gemma2 | 9B | 67.64 |68.52 | 75.31 | 59.30 | 86.67 | 68.28 | 73.73 | 55.19 |
| gte-Qwen2-1.5B-instruct | 1.5B | 67.12 | 67.79 | 72.53 | 54.61 | 79.5 | 68.21 | 71.86 | 60.05 |
| gte-Qwen2-7B-instruct | 7.6B | 71.62 | 72.19 | 75.77 | 66.06 | 81.16 | 69.24 | 75.70 | 65.20 |
| ritrieve_zh_v1 | 0.3B | 72.71 | 73.85 | 76.88 | 66.5 | **85.98** | **72.86** | 76.97 | **63.92** |
| **Qwen3-Embedding-0.6B** | 0.6B | 66.33 | 67.45 | 71.40 | 68.74 | 76.42 | 62.58 | 71.03 | 54.52 |
| **Qwen3-Embedding-4B** | 4B | 72.27 | 73.51 | 75.46 | 77.89 | 83.34 | 66.05 | 77.03 | 61.26 |
| **Qwen3-Embedding-8B** | 8B | **73.84** | **75.00** | **76.97** | **80.08** | 84.23 | 66.99 | **78.21** | 63.53 |
## Citation
If you find our work helpful, feel free to give us a cite.
```
@article{qwen3embedding,
title={Qwen3 Embedding: Advancing Text Embedding and Reranking Through Foundation Models},
author={Zhang, Yanzhao and Li, Mingxin and Long, Dingkun and Zhang, Xin and Lin, Huan and Yang, Baosong and Xie, Pengjun and Yang, An and Liu, Dayiheng and Lin, Junyang and Huang, Fei and Zhou, Jingren},
journal={arXiv preprint arXiv:2506.05176},
year={2025}
}
```

30
config.json Normal file
View File

@ -0,0 +1,30 @@
{
"architectures": [
"Qwen3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 151643,
"eos_token_id": 151645,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 12288,
"max_position_embeddings": 40960,
"max_window_layers": 36,
"model_type": "qwen3",
"num_attention_heads": 32,
"num_hidden_layers": 36,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 1000000,
"sliding_window": null,
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.51.2",
"use_cache": true,
"use_sliding_window": false,
"vocab_size": 151665
}

View File

@ -0,0 +1,8 @@
{
"prompts": {
"query": "Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery:",
"document": ""
},
"default_prompt_name": null,
"similarity_fn_name": "cosine"
}

1
configuration.json Normal file
View File

@ -0,0 +1 @@
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}

6
generation_config.json Normal file
View File

@ -0,0 +1,6 @@
{
"bos_token_id": 151643,
"eos_token_id": 151643,
"max_new_tokens": 2048,
"transformers_version": "4.51.3"
}

151388
merges.txt Normal file

File diff suppressed because it is too large Load Diff

BIN
model-00001-of-00004.safetensors (Stored with Git LFS) Normal file

Binary file not shown.

BIN
model-00002-of-00004.safetensors (Stored with Git LFS) Normal file

Binary file not shown.

BIN
model-00003-of-00004.safetensors (Stored with Git LFS) Normal file

Binary file not shown.

BIN
model-00004-of-00004.safetensors (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,405 @@
{
"metadata": {
"total_size": 15134590976
},
"weight_map": {
"embed_tokens.weight": "model-00001-of-00004.safetensors",
"layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.21.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.21.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.22.self_attn.k_norm.weight": "model-00002-of-00004.safetensors",
"layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"layers.22.self_attn.q_norm.weight": "model-00002-of-00004.safetensors",
"layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.32.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.32.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.32.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.32.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.32.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.32.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.32.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.32.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.32.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.33.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.33.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.33.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.33.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.33.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.33.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.33.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.33.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.33.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.34.input_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.34.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"layers.34.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"layers.34.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"layers.34.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"layers.34.self_attn.k_norm.weight": "model-00003-of-00004.safetensors",
"layers.34.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.34.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"layers.34.self_attn.q_norm.weight": "model-00003-of-00004.safetensors",
"layers.34.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.34.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
"layers.35.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"layers.35.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"layers.35.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"layers.35.self_attn.k_norm.weight": "model-00004-of-00004.safetensors",
"layers.35.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"layers.35.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"layers.35.self_attn.q_norm.weight": "model-00004-of-00004.safetensors",
"layers.35.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"layers.35.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"layers.9.self_attn.k_norm.weight": "model-00001-of-00004.safetensors",
"layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"layers.9.self_attn.q_norm.weight": "model-00001-of-00004.safetensors",
"layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"norm.weight": "model-00004-of-00004.safetensors"
}
}

20
modules.json Normal file
View File

@ -0,0 +1,20 @@
[
{
"idx": 0,
"name": "0",
"path": "",
"type": "sentence_transformers.models.Transformer"
},
{
"idx": 1,
"name": "1",
"path": "1_Pooling",
"type": "sentence_transformers.models.Pooling"
},
{
"idx": 2,
"name": "2",
"path": "2_Normalize",
"type": "sentence_transformers.models.Normalize"
}
]

BIN
tokenizer.json (Stored with Git LFS) Normal file

Binary file not shown.

208
tokenizer_config.json Normal file
View File

@ -0,0 +1,208 @@
{
"add_bos_token": false,
"add_prefix_space": false,
"added_tokens_decoder": {
"151643": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151644": {
"content": "<|im_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151645": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151646": {
"content": "<|object_ref_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151647": {
"content": "<|object_ref_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151648": {
"content": "<|box_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151649": {
"content": "<|box_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151650": {
"content": "<|quad_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151651": {
"content": "<|quad_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151652": {
"content": "<|vision_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151653": {
"content": "<|vision_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151654": {
"content": "<|vision_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151655": {
"content": "<|image_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151656": {
"content": "<|video_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151657": {
"content": "<tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151658": {
"content": "</tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151659": {
"content": "<|fim_prefix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151660": {
"content": "<|fim_middle|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151661": {
"content": "<|fim_suffix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151662": {
"content": "<|fim_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151663": {
"content": "<|repo_name|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151664": {
"content": "<|file_sep|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
}
},
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>",
"<|object_ref_start|>",
"<|object_ref_end|>",
"<|box_start|>",
"<|box_end|>",
"<|quad_start|>",
"<|quad_end|>",
"<|vision_start|>",
"<|vision_end|>",
"<|vision_pad|>",
"<|image_pad|>",
"<|video_pad|>"
],
"bos_token": null,
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
"clean_up_tokenization_spaces": false,
"eos_token": "<|im_end|>",
"errors": "replace",
"extra_special_tokens": {},
"model_max_length": 131072,
"pad_token": "<|endoftext|>",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}

1
vocab.json Normal file

File diff suppressed because one or more lines are too long