diff --git a/.gitattributes b/.gitattributes
index 15ba2c6..b2dba1d 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -45,3 +45,138 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+
+merges.txt filter=lfs diff=lfs merge=lfs -text
+model-00003-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00004-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00006-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00005-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00002-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00015-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00001-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00016-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00008-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00011-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00013-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00025-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00017-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00019-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00023-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00021-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00024-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00122-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00022-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00012-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00026-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00027-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00039-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00009-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00007-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00010-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00041-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00029-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00031-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00018-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00028-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00032-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00030-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00037-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00036-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00000-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00038-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00046-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00033-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00035-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00043-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00055-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00050-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00040-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00057-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00042-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00044-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00045-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00047-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00051-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00049-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00048-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00034-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00058-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00053-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00052-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00059-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00066-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00062-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00075-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00054-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00063-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00067-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00064-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00065-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00074-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00068-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00071-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00081-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00061-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00069-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00082-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00076-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00087-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00072-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00079-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00078-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00073-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00056-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00077-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00088-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00080-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00060-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00014-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00020-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00083-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00070-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00099-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00097-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00091-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00089-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00086-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00085-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00084-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00105-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00111-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00110-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00113-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00117-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00094-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00093-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00090-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00096-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00095-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00101-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00103-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00092-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00098-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00107-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00102-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00119-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00109-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00100-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00115-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00121-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00106-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00104-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
+model-00116-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00118-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00112-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00126-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00114-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00125-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00108-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00129-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00127-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
+vocab.json filter=lfs diff=lfs merge=lfs -text
+model-00124-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00128-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00123-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
+model-00120-of-00130.safetensors filter=lfs diff=lfs merge=lfs -text
\ No newline at end of file
diff --git a/README.md b/README.md
index 8f922aa..880d2d9 100644
--- a/README.md
+++ b/README.md
@@ -1,48 +1,190 @@
---
-license: Apache License 2.0
-tags: []
-
-#model-type:
-##如 gpt、phi、llama、chatglm、baichuan 等
-#- gpt
-
-#domain:
-##如 nlp、cv、audio、multi-modal
-#- nlp
-
-#language:
-##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
-#- cn
-
-#metrics:
-##如 CIDEr、Blue、ROUGE 等
-#- CIDEr
-
-#tags:
-##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
-#- pretrained
-
-#tools:
-##如 vllm、fastchat、llamacpp、AdaSeq 等
-#- vllm
+pipeline_tag: text-generation
+license: other
+license_name: modified-mit
+license_link: https://github.com/MiniMax-AI/MiniMax-M2.1/blob/main/LICENSE
+library_name: transformers
---
-### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
-#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
-SDK下载
-```bash
-#安装ModelScope
-pip install modelscope
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Meet MiniMax-M2.1
+
+Today, we are handing **MiniMax-M2.1** over to the open-source community. This release is more than just a parameter update; it is a significant step toward democratizing top-tier agentic capabilities.
+
+M2.1 was built to shatter the stereotype that high-performance agents must remain behind closed doors. We have optimized the model specifically for robustness in coding, tool use, instruction following, and long-horizon planning. From automating multilingual software development to executing complex, multi-step office workflows, MiniMax-M2.1 empowers developers to build the next generation of autonomous applications—all while being fully transparent, controllable, and accessible.
+
+We believe true intelligence should be within reach. M2.1 is our commitment to the future, and a powerful new tool in your hands.
+
+
+
+
+
+## How to Use
+
+- The MiniMax-M2.1 API is now live on the MiniMax Open Platform: https://platform.minimax.io/docs/guides/text-generation
+- Our product MiniMax Agent, built on MiniMax-M2.1, is now publicly available: https://agent.minimax.io/
+- The MiniMax-M2.1 model weights are now open-source, allowing for local deployment and use: https://huggingface.co/MiniMaxAI/MiniMax-M2.1
+
+## Benchmarks
+
+MiniMax-M2.1 delivers a significant leap over M2 on core software engineering leaderboards. It shines particularly bright in multilingual scenarios, where it outperforms Claude Sonnet 4.5 and closely approaches Claude Opus 4.5.
+
+| Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
+| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
+| SWE-bench Verified | 74.0 | 69.4 | 77.2 | 80.9 | 78.0 | 80.0 | 73.1 |
+| Multi-SWE-bench | 49.4 | 36.2 | 44.3 | 50.0 | 42.7 | x | 37.4 |
+| SWE-bench Multilingual | 72.5 | 56.5 | 68 | 77.5 | 65.0 | 72.0 | 70.2 |
+| Terminal-bench 2.0 | 47.9 | 30.0 | 50.0 | 57.8 | 54.2 | 54.0 | 46.4 |
+
+We also evaluated MiniMax-M2.1 on SWE-bench Verified across a variety of coding agent frameworks. The results highlight the model's exceptional framework generalization and robust stability.
+
+Furthermore, across specific benchmarks—including test case generation, code performance optimization, code review, and instruction following—MiniMax-M2.1 demonstrates comprehensive improvements over M2. In these specialized domains, it consistently matches or exceeds the performance of Claude Sonnet 4.5.
+
+| Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
+| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
+| SWE-bench Verified (Droid) | 71.3 | 68.1 | 72.3 | 75.2 | x | x | 67.0 |
+| SWE-bench Verified (mini-swe-agent) | 67.0 | 61.0 | 70.6 | 74.4 | 71.8 | 74.2 | 60.0 |
+| SWT-bench | 69.3 | 32.8 | 69.5 | 80.2 | 79.7 | 80.7 | 62.0 |
+| SWE-Perf | 3.1 | 1.4 | 3.0 | 4.7 | 6.5 | 3.6 | 0.9 |
+| SWE-Review | 8.9 | 3.4 | 10.5 | 16.2 | x | x | 6.4 |
+| OctoCodingbench | 26.1 | 13.3 | 22.8 | 36.2 | 22.9 | x | 26.0 |
+
+To evaluate the model's full-stack capability to architect complete, functional applications "from zero to one," we established a novel benchmark: [VIBE (Visual & Interactive Benchmark for Execution)](https://huggingface.co/datasets/MiniMaxAI/VIBE). This suite encompasses five core subsets: Web, Simulation, Android, iOS, and Backend. Distinguishing itself from traditional benchmarks, VIBE leverages an innovative Agent-as-a-Verifier (AaaV) paradigm to automatically assess the interactive logic and visual aesthetics of generated applications within a real runtime environment.
+
+MiniMax-M2.1 delivers outstanding performance on the VIBE aggregate benchmark, achieving an average score of 88.6—demonstrating robust full-stack development capabilities. It excels particularly in the VIBE-Web (91.5) and VIBE-Android (89.7) subsets.
+
+| Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro |
+| ----- | ----- | ----- | ----- | ----- | ----- |
+| VIBE (Average) | 88.6 | 67.5 | 85.2 | 90.7 | 82.4 |
+| VIBE-Web | 91.5 | 80.4 | 87.3 | 89.1 | 89.5 |
+| VIBE-Simulation | 87.1 | 77.0 | 79.1 | 84.0 | 89.2 |
+| VIBE-Android | 89.7 | 69.2 | 87.5 | 92.2 | 78.7 |
+| VIBE-iOS | 88.0 | 39.5 | 81.2 | 90.0 | 75.8 |
+| VIBE-Backend | 86.7 | 67.8 | 90.8 | 98.0 | 78.7 |
+
+MiniMax-M2.1 also demonstrates steady improvements over M2 in both long-horizon tool use and comprehensive intelligence metrics.
+
+| Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
+| ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
+| Toolathlon | 43.5 | 16.7 | 38.9 | 43.5 | 36.4 | 41.7 | 35.2 |
+| BrowseComp | 47.4 | 44.0 | 19.6 | 37.0 | 37.8 | 65.8 | 51.4 |
+| BrowseComp (context management) | 62.0 | 56.9 | 26.1 | 57.8 | 59.2 | 70.0 | 67.6 |
+| AIME25 | 83.0 | 78.0 | 88.0 | 91.0 | 96.0 | 98.0 | 92.0 |
+| MMLU-Pro | 88.0 | 82.0 | 88.0 | 90.0 | 90.0 | 87.0 | 86.0 |
+| GPQA-D | 83.0 | 78.0 | 83.0 | 87.0 | 91.0 | 90.0 | 84.0 |
+| HLE w/o tools | 22.2 | 12.5 | 17.3 | 28.4 | 37.2 | 31.4 | 22.2 |
+| LCB | 81.0 | 83.0 | 71.0 | 87.0 | 92.0 | 89.0 | 86.0 |
+| SciCode | 41.0 | 36.0 | 45.0 | 50.0 | 56.0 | 52.0 | 39.0 |
+| IFBench | 70.0 | 72.0 | 57.0 | 58.0 | 70.0 | 75.0 | 61.0 |
+| AA-LCR | 62.0 | 61.0 | 66.0 | 74.0 | 71.0 | 73.0 | 65.0 |
+| 𝜏²-Bench Telecom | 87.0 | 87.0 | 78.0 | 90.0 | 87.0 | 85.0 | 91.0 |
+
+> **Evaluation Methodology Notes**:
+> - **SWE-bench Verified**: Tested on internal infrastructure using [Claude Code](https://github.com/anthropics/claude-code), [Droid](https://factory.ai/), or [mini-swe-agent](https://github.com/SWE-agent/mini-SWE-agent) as scaffolding. By default, we utilized Claude Code metrics. When using Claude Code, the default system prompt was overridden. Results represent the average of 4 runs.
+> - **Multi-SWE-Bench & SWE-bench Multilingual & SWT-bench & SWE-Perf**: Tested on internal infrastructure using Claude Code as scaffolding, with the default system prompt overridden. Results represent the average of 4 runs.
+> - **Terminal-bench 2.0**: Tested using Claude Code on our internal evaluation framework. We verified the full dataset and fixed environmental issues. Timeout limits were removed, while all other configurations remained consistent with official settings. Results represent the average of 4 runs.
+> - **SWE Review**: Built upon the SWE framework, this internal benchmark for code defect review covers diverse languages and scenarios, evaluating both defect recall and hallucination rates. A review is deemed "correct" only if the model accurately identifies the target defect and ensures all other reported findings are valid and free of hallucinations. All evaluations are executed using Claude Code, with final results reflecting the average of four independent runs per test case. We plan to open-source this benchmark soon.
+> - **OctoCodingbench**: An internal benchmark focused on long-horizon instruction following for Code Agents in complex development scenarios. It conducts end-to-end behavioral supervision within a dynamic environment spanning diverse tech stacks and scaffolding frameworks. The core objective is to evaluate the model's ability to integrate and execute "composite instruction constraints"—encompassing System Prompts (SP), User Queries, Memory, Tool Schemas, and specifications such as `Agents.md`, `Claude.md`, and `Skill.md`. Adopting a strict "single-violation-failure" scoring mechanism, the final result is the average pass rate across 4 runs, quantifying the model's robustness in translating static constraints into precise behaviors. We plan to open-source this benchmark soon.
+> - **VIBE**: An internal benchmark that utilizes Claude Code as scaffolding to automatically verify a program's interactive logic and visual effects. Scores are calculated through a unified pipeline comprising requirement sets, containerized deployment, and dynamic interaction environments. Final results represent the average of 3 runs. We have open-sourced this benchmark at [VIBE](https://huggingface.co/datasets/MiniMaxAI/VIBE).
+> - **Toolathlon**: The evaluation protocol remains consistent with the original paper.
+> - **BrowseComp**: All scores were obtained using the same agent framework as [WebExplorer](https://arxiv.org/pdf/2509.06501) (Liu et al. 2025), with only minor fine-tuning of tool descriptions. We utilized the same 103-sample GAIA text-only validation subset as WebExplorer.
+> - **BrowseComp (context management)**: When token usage exceeds 30% of the maximum context window, we retain the first AI response, the last five AI responses, and the tool outputs, discarding the remaining content.
+> - **AIME25 ~ 𝜏²-Bench Telecom**: Derived from internal testing based on the evaluation datasets and methodology referenced in the [Artificial Analysis Intelligence Index](https://artificialanalysis.ai/).
+
+## Local Deployment Guide
+
+Download the model from HuggingFace repository: https://huggingface.co/MiniMaxAI/MiniMax-M2.1
+
+We recommend using the following inference frameworks (listed alphabetically) to serve the model:
+
+### SGLang
+
+We recommend using [SGLang](https://docs.sglang.io/) to serve MiniMax-M2.1. Please refer to our [SGLang Deployment Guide](./docs/sglang_deploy_guide.md).
+
+### vLLM
+
+We recommend using [vLLM](https://github.com/vllm-project/vllm) to serve MiniMax-M2.1. Please refer to our [vLLM Deployment Guide](./docs/vllm_deploy_guide.md).
+
+### Transformers
+
+We recommend using [Transformers](https://github.com/huggingface/transformers) to serve MiniMax-M2.1. Please refer to our [Transformers Deployment Guide](./docs/transformers_deploy_guide.md).
+
+### Other Inference Engines
+
+- [KTransformers](https://github.com/kvcache-ai/ktransformers)
+
+### Inference Parameters
+
+We recommend using the following parameters for best performance: `temperature=1.0`, `top_p = 0.95`, `top_k = 40`. Default system prompt:
+
```
-```python
-#SDK模型下载
-from modelscope import snapshot_download
-model_dir = snapshot_download('MiniMax/MiniMax-M2.1')
-```
-Git下载
-```
-#Git模型下载
-git clone https://www.modelscope.cn/MiniMax/MiniMax-M2.1.git
+You are a helpful assistant. Your name is MiniMax-M2.1 and is built by MiniMax.
```
-如果您是本模型的贡献者,我们邀请您根据模型贡献文档 ,及时完善模型卡片内容。
\ No newline at end of file
+## Tool Calling Guide
+
+Please refer to our [Tool Calling Guide](./docs/tool_calling_guide.md).
+
+## Contact Us
+
+Contact us at [model@minimax.io](mailto:model@minimax.io).
diff --git a/chat_template.jinja b/chat_template.jinja
new file mode 100644
index 0000000..22274b9
--- /dev/null
+++ b/chat_template.jinja
@@ -0,0 +1,165 @@
+{# ----------‑‑‑ special token variables ‑‑‑---------- #}
+{%- set toolcall_begin_token = '' -%}
+{%- set toolcall_end_token = ' ' -%}
+{#- Tool Rendering Functions ============================================== -#}
+{%- macro render_tool_namespace(namespace_name, tool_list) -%}
+{%- for tool in tool_list -%}
+{{ tool.function | tojson(ensure_ascii=False) }}
+{% endfor -%}
+{%- endmacro -%}
+{%- macro visible_text(content) -%}
+ {%- if content is string -%}
+ {{ content }}
+ {%- elif content is iterable and content is not mapping -%}
+ {%- for item in content -%}
+ {%- if item is mapping and item.type == 'text' -%}
+ {{- item.text }}
+ {%- elif item is string -%}
+ {{- item }}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- elif content is none -%}
+ {{- '' }}
+ {%- else -%}
+ {{- content }}
+ {%- endif -%}
+{%- endmacro -%}
+{#- System Message Construction ============================================ -#}
+{%- macro build_system_message(system_message) -%}
+ {%- if system_message and system_message.content -%}
+ {{- visible_text(system_message.content) }}
+ {%- else -%}
+ {%- if model_identity is not defined -%}
+ {%- set model_identity = "You are a helpful assistant. Your name is MiniMax-M2.1 and is built by MiniMax." -%}
+ {%- endif -%}
+ {{- model_identity }}
+ {%- endif -%}
+
+ {#- Handle current_date -#}
+ {%- if system_message and system_message.current_date -%}
+ {{- '\n' ~ 'Current date: ' + system_message.current_date }}
+ {%- endif -%}
+ {#- Handle current_location -#}
+ {%- if system_message and system_message.current_location -%}
+ {{- '\n' ~ 'Current location: ' + system_message.current_location }}
+ {%- endif -%}
+{%- endmacro -%}
+{#- Main Template Logic ================================================= -#}
+{#- Extract system message (only first message if it's system) -#}
+{%- set system_message = none -%}
+{%- set conversation_messages = messages -%}
+{%- if messages and messages[0].role == "system" -%}
+ {%- set system_message = messages[0] -%}
+ {%- set conversation_messages = messages[1:] -%}
+{%- endif -%}
+{#- Get the last user message turn, for interleved thinking -#}
+{%- set ns = namespace(last_user_index=-1) %}
+{% for m in conversation_messages %}
+ {%- if m.role == 'user' %}
+ {% set ns.last_user_index = loop.index0 -%}
+ {%- endif %}
+{%- endfor %}
+{#- Render system message -#}
+{{- ']~!b[' ~ ']~b]system' ~ '\n' }}
+{{- build_system_message(system_message) }}
+{#- Render tools if available -#}
+{%- if tools -%}
+ {{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
+ {{- '\n' ~ '' ~ '\n' }}
+ {{- render_tool_namespace("functions", tools) }}
+ {{- ' ' ~ '\n\n' }}
+{{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
+{{- '\n' ~ toolcall_begin_token }}
+
+param-value-1
+param-value-2
+...
+
+{{- '\n' ~ toolcall_end_token }}
+{%- endif -%}
+{{- '[e~[\n' }}
+
+{#- Render messages -#}
+{%- set last_tool_call = namespace(name=none) -%}
+{%- for message in conversation_messages -%}
+ {%- if message.role == 'assistant' -%}
+ {#- Only render reasoning_content if no user message follows -#}
+ {{- ']~b]ai' ~ '\n' }}
+
+ {%- set reasoning_content = '' %}
+ {%- set content = visible_text(message.content) %}
+ {%- if message.reasoning_content is string %}
+ {%- set reasoning_content = message.reasoning_content %}
+ {%- else %}
+ {%- if '' in content %}
+ {%- set reasoning_content = content.split('')[0].strip('\n').split('')[-1].strip('\n') %}
+ {%- set content = content.split(' ')[-1].strip('\n') %}
+ {%- endif %}
+ {%- endif %}
+ {%- if reasoning_content and loop.index0 > ns.last_user_index -%}
+ {{- '' ~ '\n' ~ reasoning_content ~ '\n' ~ ' ' ~ '\n\n' }}
+ {%- endif -%}
+ {%- if content -%}
+ {{- content }}
+ {%- endif -%}
+ {%- if message.tool_calls -%}
+ {{- '\n' ~ toolcall_begin_token ~ '\n' }}
+
+ {%- for tool_call in message.tool_calls -%}
+ {%- if tool_call.function %}
+ {%- set tool_call = tool_call.function %}
+ {%- endif %}
+ {{- '' }}
+ {% set _args = tool_call.arguments %}
+ {%- for k, v in _args.items() %}
+ {{- '' }}
+ {{- v | tojson(ensure_ascii=False) if v is not string else v }}
+ {{- ' ' }}
+ {% endfor %}
+ {{- ' ' ~ '\n' }}
+ {%- endfor -%}
+
+ {{- toolcall_end_token}}
+ {%- if message.tool_calls[-1].function -%}
+ {%- set last_tool_call.name = message.tool_calls[-1].function.name -%}
+ {%- else -%}
+ {%- set last_tool_call.name = message.tool_calls[-1].name -%}
+ {%- endif -%}
+ {%- else -%}
+ {%- set last_tool_call.name = none -%}
+ {%- endif -%}
+ {{- '[e~[' ~ '\n' }}
+
+ {%- elif message.role == 'tool' -%}
+ {%- if last_tool_call.name is none -%}
+ {{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
+ {%- endif -%}
+ {%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
+ {{- ']~b]tool' }}
+ {%- endif -%}
+ {%- if message.content is string -%}
+ {{- '\n' }}
+ {{- message.content }}
+ {{- ' ' }}
+ {%- else -%}
+ {%- for tr in message.content -%}
+ {{- '\n' }}
+ {{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
+ {{- '\n ' }}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
+ {{- '[e~[\n' -}}
+ {%- endif -%}
+
+ {%- elif message.role == 'user' -%}
+ {{- ']~b]user' ~ '\n' }}
+ {{- visible_text(message.content) }}
+ {{- '[e~[' ~ '\n' }}
+ {%- endif -%}
+{%- endfor -%}
+
+{#- Generation prompt -#}
+{%- if add_generation_prompt -%}
+{{- ']~b]ai' ~ '\n' ~ '' ~ '\n' }}
+{%- endif -%}
diff --git a/config.json b/config.json
new file mode 100644
index 0000000..f300c2e
--- /dev/null
+++ b/config.json
@@ -0,0 +1,113 @@
+{
+ "architectures": [
+ "MiniMaxM2ForCausalLM"
+ ],
+ "attn_type_list": [
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1
+ ],
+ "auto_map": {
+ "AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
+ "AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
+ },
+ "head_dim": 128,
+ "hidden_act": "silu",
+ "hidden_size": 3072,
+ "intermediate_size": 1536,
+ "max_position_embeddings": 196608,
+ "model_type": "minimax_m2",
+ "mtp_transformer_layers": 1,
+ "num_attention_heads": 48,
+ "num_experts_per_tok": 8,
+ "num_hidden_layers": 62,
+ "num_key_value_heads": 8,
+ "num_local_experts": 256,
+ "num_mtp_modules": 3,
+ "qk_norm_type": "per_layer",
+ "quantization_config": {
+ "activation_scheme": "dynamic",
+ "fmt": "float8_e4m3fn",
+ "quant_method": "fp8",
+ "weight_block_size": [
+ 128,
+ 128
+ ],
+ "modules_to_not_convert": [
+ "gate",
+ "e_score_correction_bias",
+ "lm_head"
+ ]
+ },
+ "rms_norm_eps": 1e-06,
+ "rope_theta": 5000000,
+ "rotary_dim": 64,
+ "scoring_func": "sigmoid",
+ "shared_intermediate_size": 0,
+ "tie_word_embeddings": false,
+ "transformers_version": "4.46.1",
+ "use_cache": true,
+ "use_mtp": true,
+ "use_qk_norm": true,
+ "use_routing_bias": true,
+ "vocab_size": 200064
+}
diff --git a/configuration.json b/configuration.json
new file mode 100644
index 0000000..bbeeda1
--- /dev/null
+++ b/configuration.json
@@ -0,0 +1 @@
+{"framework": "pytorch", "task": "text-generation", "allow_remote": true}
\ No newline at end of file
diff --git a/configuration_minimax_m2.py b/configuration_minimax_m2.py
new file mode 100644
index 0000000..7fcd986
--- /dev/null
+++ b/configuration_minimax_m2.py
@@ -0,0 +1,200 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_minimax_m2.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2025 the HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from transformers.configuration_utils import PretrainedConfig
+
+
+class MiniMaxM2Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MiniMaxM2Model`]. It is used to instantiate an
+ MiniMaxM2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the MiniMaxM2-7B-v0.1 or MiniMaxM2-7B-Instruct-v0.1.
+
+ [minimax_m2ai/MiniMaxM2-8x7B](https://huggingface.co/minimax_m2ai/MiniMaxM2-8x7B)
+ [minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1](https://huggingface.co/minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1)
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the MiniMaxM2 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`MiniMaxM2Model`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 14336):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_key_value_heads (`int`, *optional*, defaults to 8):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details, check out [this
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
+ head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
+ The attention head dimension.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
+ The maximum sequence length that this model might ever be used with. MiniMaxM2's sliding window attention
+ allows sequence of up to 4096*32 tokens.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ pad_token_id (`int`, *optional*):
+ The id of the padding token.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ The id of the "beginning-of-sequence" token.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the "end-of-sequence" token.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether the model's input and output word embeddings should be tied.
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
+ The base period of the RoPE embeddings.
+ sliding_window (`int`, *optional*):
+ Sliding window attention window size. If not specified, will default to `4096`.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
+ parameter
+ num_local_experts (`int`, *optional*, defaults to 8):
+ Number of experts per Sparse MLP layer.
+ output_router_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not the router logits should be returned by the model. Enabling this will also
+ allow the model to output the auxiliary loss. See [here]() for more details
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
+ The aux loss factor for the total loss.
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
+ Amount of noise to add to the router.
+
+ ```python
+ >>> from transformers import MiniMaxM2Model, MiniMaxM2Config
+
+ >>> # Initializing a MiniMaxM2 7B style configuration
+ >>> configuration = MiniMaxM2Config()
+
+ >>> # Initializing a model from the MiniMaxM2 7B style configuration
+ >>> model = MiniMaxM2Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "minimax_m2"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ base_model_tp_plan = {
+ "layers.*.self_attn.q_proj": "colwise",
+ "layers.*.self_attn.k_proj": "colwise",
+ "layers.*.self_attn.v_proj": "colwise",
+ "layers.*.self_attn.o_proj": "rowwise",
+ "layers.*.block_sparse_moe.gate": "colwise_rep", # we need to replicate here to correctly route experts
+ "layers.*.block_sparse_moe.experts.*.w1": "colwise",
+ "layers.*.block_sparse_moe.experts.*.w2": "rowwise",
+ "layers.*.block_sparse_moe.experts.*.w3": "colwise",
+ }
+ base_model_pp_plan = {
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
+ "norm": (["hidden_states"], ["hidden_states"]),
+ }
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ hidden_size=4096,
+ intermediate_size=14336,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=8,
+ head_dim=None,
+ hidden_act="silu",
+ max_position_embeddings=4096 * 32,
+ initializer_range=0.02,
+ rms_norm_eps=1e-5,
+ use_cache=True,
+ pad_token_id=None,
+ bos_token_id=1,
+ eos_token_id=2,
+ tie_word_embeddings=False,
+ rope_theta=1e6,
+ sliding_window=None,
+ attention_dropout=0.0,
+ num_experts_per_tok=2,
+ num_local_experts=8,
+ output_router_logits=False,
+ router_aux_loss_coef=0.001,
+ router_jitter_noise=0.0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.sliding_window = sliding_window
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.attention_dropout = attention_dropout
+ self.head_dim = head_dim
+
+ self.num_experts_per_tok = num_experts_per_tok
+ self.num_local_experts = num_local_experts
+ self.output_router_logits = output_router_logits
+ self.router_aux_loss_coef = router_aux_loss_coef
+ self.router_jitter_noise = router_jitter_noise
+
+ self.use_qk_norm = kwargs.pop("use_qk_norm", False)
+ self.rotary_dim = kwargs.pop("rotary_dim", self.head_dim)
+ self.partial_rotary_factor = kwargs.pop("partial_rotary_factor", 1)
+ if self.head_dim is not None:
+ self.partial_rotary_factor = self.rotary_dim / self.head_dim
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+
+__all__ = ["MiniMaxM2Config"]
diff --git a/docs/sglang_deploy_guide.md b/docs/sglang_deploy_guide.md
new file mode 100644
index 0000000..b446ec5
--- /dev/null
+++ b/docs/sglang_deploy_guide.md
@@ -0,0 +1,110 @@
+# MiniMax M2.1 Model SGLang Deployment Guide
+
+[English Version](./sglang_deploy_guide.md) | [Chinese Version](./sglang_deploy_guide_cn.md)
+
+We recommend using [SGLang](https://github.com/sgl-project/sglang) to deploy the [MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1) model. SGLang is a high-performance inference engine with excellent serving throughput, efficient and intelligent memory management, powerful batch request processing capabilities, and deeply optimized underlying performance. We recommend reviewing SGLang's official documentation to check hardware compatibility before deployment.
+
+## Applicable Models
+
+This document applies to the following models. You only need to change the model name during deployment.
+
+- [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
+- [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
+
+The deployment process is illustrated below using MiniMax-M2.1 as an example.
+
+## System Requirements
+
+- OS: Linux
+
+- Python: 3.9 - 3.12
+
+- GPU:
+
+ - compute capability 7.0 or higher
+
+ - Memory requirements: 220 GB for weights, 240 GB per 1M context tokens
+
+The following are recommended configurations; actual requirements should be adjusted based on your use case:
+
+- 4x 96GB GPUs: Supported context length of up to 400K tokens.
+
+- 8x 144GB GPUs: Supported context length of up to 3M tokens.
+
+## Deployment with Python
+
+It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
+
+We recommend installing SGLang in a fresh Python environment:
+
+```bash
+uv venv
+source .venv/bin/activate
+git clone https://github.com/sgl-project/sglang
+cd sglang
+uv pip install -e "python" --prerelease=allow
+```
+
+Run the following command to start the SGLang server. SGLang will automatically download and cache the MiniMax-M2.1 model from Hugging Face.
+
+4-GPU deployment command:
+
+```bash
+python -m sglang.launch_server \
+ --model-path MiniMaxAI/MiniMax-M2.1 \
+ --tp-size 4 \
+ --tool-call-parser minimax-m2 \
+ --reasoning-parser minimax-append-think \
+ --host 0.0.0.0 \
+ --trust-remote-code \
+ --port 8000 \
+ --mem-fraction-static 0.85
+```
+
+8-GPU deployment command:
+
+```bash
+python -m sglang.launch_server \
+ --model-path MiniMaxAI/MiniMax-M2.1 \
+ --tp-size 8 \
+ --ep-size 8 \
+ --tool-call-parser minimax-m2 \
+ --trust-remote-code \
+ --host 0.0.0.0 \
+ --reasoning-parser minimax-append-think \
+ --port 8000 \
+ --mem-fraction-static 0.85
+```
+
+## Testing Deployment
+
+After startup, you can test the SGLang OpenAI-compatible API with the following command:
+
+```bash
+curl http://localhost:8000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "MiniMaxAI/MiniMax-M2.1",
+ "messages": [
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
+ ]
+ }'
+```
+
+## Common Issues
+
+### MiniMax-M2 model is not currently supported
+
+Please upgrade to the latest stable version, >= v0.5.4.post1.
+
+## Getting Support
+
+If you encounter any issues while deploying the MiniMax model:
+
+- Contact our technical support team through official channels such as email at [model@minimax.io](mailto:model@minimax.io)
+
+- Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
+
+We continuously optimize the deployment experience for our models. Feedback is welcome!
+
diff --git a/docs/sglang_deploy_guide_cn.md b/docs/sglang_deploy_guide_cn.md
new file mode 100644
index 0000000..60d81c6
--- /dev/null
+++ b/docs/sglang_deploy_guide_cn.md
@@ -0,0 +1,119 @@
+# MiniMax M2.1 模型 SGLang 部署指南
+
+[英文版](./sglang_deploy_guide.md) | [中文版](./sglang_deploy_guide_cn.md)
+
+我们推荐使用 [SGLang](https://github.com/sgl-project/sglang) 来部署 [MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1) 模型。SGLang 是一个高性能的推理引擎,其具有卓越的服务吞吐、高效智能的内存管理机制、强大的批量请求处理能力、深度优化的底层性能等特性。我们建议在部署之前查看 SGLang 的官方文档以检查硬件兼容性。
+
+## 本文档适用模型
+
+本文档适用以下模型,只需在部署时修改模型名称即可。
+
+- [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
+- [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
+
+以下以 MiniMax-M2.1 为例说明部署流程。
+
+## 环境要求
+
+- OS:Linux
+
+- Python:3.9 - 3.12
+
+- GPU:
+
+ - compute capability 7.0 or higher
+
+ - 显存需求:权重需要 220 GB,每 1M 上下文 token 需要 240 GB
+
+以下为推荐配置,实际需求请根据业务场景调整:
+
+- 96G x4 GPU:支持 40 万 token 的总上下文。
+
+- 144G x8 GPU:支持长达 300 万 token 的总上下文。
+
+## 使用 Python 部署
+
+建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
+
+建议在全新的 Python 环境中安装 SGLang:
+
+```bash
+uv venv
+source .venv/bin/activate
+git clone https://github.com/sgl-project/sglang
+cd sglang
+uv pip install -e "python" --prerelease=allow
+```
+
+运行如下命令启动 SGLang 服务器,SGLang 会自动从 Huggingface 下载并缓存 MiniMax-M2.1 模型。
+
+4 卡部署命令:
+
+```bash
+python -m sglang.launch_server \
+ --model-path MiniMaxAI/MiniMax-M2.1 \
+ --tp-size 4 \
+ --tool-call-parser minimax-m2 \
+ --reasoning-parser minimax-append-think \
+ --host 0.0.0.0 \
+ --trust-remote-code \
+ --port 8000 \
+ --mem-fraction-static 0.85
+```
+
+8 卡部署命令:
+
+```bash
+python -m sglang.launch_server \
+ --model-path MiniMaxAI/MiniMax-M2.1 \
+ --tp-size 8 \
+ --ep-size 8 \
+ --tool-call-parser minimax-m2 \
+ --trust-remote-code \
+ --host 0.0.0.0 \
+ --reasoning-parser minimax-append-think \
+ --port 8000 \
+ --mem-fraction-static 0.85
+```
+
+## 测试部署
+
+启动后,可以通过如下命令测试 SGLang OpenAI 兼容接口:
+
+```bash
+curl http://localhost:8000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "MiniMaxAI/MiniMax-M2.1",
+ "messages": [
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
+ ]
+ }'
+```
+
+## 常见问题
+
+### Huggingface 网络问题
+
+如果遇到网络问题,可以设置代理后再进行拉取。
+
+```bash
+export HF_ENDPOINT=https://hf-mirror.com
+```
+
+### MiniMax-M2 model is not currently supported
+
+请升级到最新的稳定版本, >= v0.5.4.post1.
+
+## 获取支持
+
+如果在部署 MiniMax 模型过程中遇到任何问题:
+
+- 通过邮箱 [model@minimax.io](mailto:model@minimax.io) 等官方渠道联系我们的技术支持团队
+
+- 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
+
+- 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
+
+我们会持续优化模型的部署体验,欢迎反馈!
diff --git a/docs/tool_calling_guide.md b/docs/tool_calling_guide.md
new file mode 100644
index 0000000..63404e3
--- /dev/null
+++ b/docs/tool_calling_guide.md
@@ -0,0 +1,487 @@
+# MiniMax-M2.1 Tool Calling Guide
+
+[English Version](./tool_calling_guide.md) | [Chinese Version](./tool_calling_guide_cn.md)
+
+MiniMax-M2.1 supports the same toolcall syntax as MiniMax-M2.
+
+## Introduction
+
+The MiniMax-M2.1 model supports tool calling capabilities, enabling the model to identify when external tools need to be called and output tool call parameters in a structured format. This document provides detailed instructions on how to use the tool calling features of MiniMax-M2.1.
+
+## Basic Example
+
+The following Python script implements a weather query tool call example based on the OpenAI SDK:
+
+```python
+from openai import OpenAI
+import json
+
+client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy")
+
+def get_weather(location: str, unit: str):
+ return f"Getting the weather for {location} in {unit}..."
+
+tool_functions = {"get_weather": get_weather}
+
+tools = [{
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "description": "Get the current weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"},
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
+ },
+ "required": ["location", "unit"]
+ }
+ }
+}]
+
+response = client.chat.completions.create(
+ model=client.models.list().data[0].id,
+ messages=[{"role": "user", "content": "What's the weather like in San Francisco? use celsius."}],
+ tools=tools,
+ tool_choice="auto"
+)
+
+print(response)
+
+tool_call = response.choices[0].message.tool_calls[0].function
+print(f"Function called: {tool_call.name}")
+print(f"Arguments: {tool_call.arguments}")
+print(f"Result: {get_weather(**json.loads(tool_call.arguments))}")
+```
+
+**Output Example:**
+```
+Function called: get_weather
+Arguments: {"location": "San Francisco, CA", "unit": "celsius"}
+Result: Getting the weather for San Francisco, CA in celsius...
+```
+
+## Manually Parsing Model Output
+
+**We strongly recommend using vLLM or SGLang for parsing tool calls.** If you cannot use the built-in parser of inference engines (e.g., vLLM and SGLang) that support MiniMax-M2.1, or need to use other inference frameworks (such as transformers, TGI, etc.), you can manually parse the model's raw output using the following method. This approach requires you to parse the XML tag format of the model output yourself.
+
+### Example Using Transformers
+
+Here is a complete example using the transformers library:
+
+```python
+from transformers import AutoTokenizer
+
+def get_default_tools():
+ return [
+ {
+ "name": "get_current_weather",
+ "description": "Get the latest weather for a location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "A certain city, such as Beijing, Shanghai"
+ }
+ },
+ }
+ "required": ["location"],
+ "type": "object"
+ }
+ ]
+
+# Load model and tokenizer
+tokenizer = AutoTokenizer.from_pretrained(model_id)
+prompt = "What's the weather like in Shanghai today?"
+messages = [
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": prompt},
+]
+
+# Enable function calling tools
+tools = get_default_tools()
+
+# Apply chat template and include tool definitions
+text = tokenizer.apply_chat_template(
+ messages,
+ tokenize=False,
+ add_generation_prompt=True,
+ tools=tools
+)
+
+# Send request (using any inference service)
+import requests
+payload = {
+ "model": "MiniMaxAI/MiniMax-M2.1",
+ "prompt": text,
+ "max_tokens": 4096
+}
+response = requests.post(
+ "http://localhost:8000/v1/completions",
+ headers={"Content-Type": "application/json"},
+ json=payload,
+ stream=False,
+)
+
+# Model output needs manual parsing
+raw_output = response.json()["choices"][0]["text"]
+print("Raw output:", raw_output)
+
+# Use the parsing function below to process the output
+tool_calls = parse_tool_calls(raw_output, tools)
+```
+
+## 🛠️ Tool Call Definition
+
+### Tool Structure
+
+Tool calls need to define the `tools` field in the request body. Each tool consists of the following parts:
+
+```json
+{
+ "tools": [
+ {
+ "name": "search_web",
+ "description": "Search function.",
+ "parameters": {
+ "properties": {
+ "query_list": {
+ "description": "Keywords for search, list should contain 1 element.",
+ "items": { "type": "string" },
+ "type": "array"
+ },
+ "query_tag": {
+ "description": "Category of query",
+ "items": { "type": "string" },
+ "type": "array"
+ }
+ },
+ "required": [ "query_list", "query_tag" ],
+ "type": "object"
+ }
+ }
+ ]
+}
+```
+
+**Field Descriptions:**
+- `name`: Function name
+- `description`: Function description
+- `parameters`: Function parameter definition
+ - `properties`: Parameter property definition, where key is the parameter name and value contains detailed parameter description
+ - `required`: List of required parameters
+ - `type`: Parameter type (usually "object")
+
+### Internal Processing Format
+
+When processing within the MiniMax-M2.1 model, tool definitions are converted to a special format and concatenated to the input text. Here is a complete example:
+
+```
+]~!b[]~b]system
+You are a helpful assistant.
+
+# Tools
+You may call one or more tools to assist with the user query.
+Here are the tools available in JSONSchema format:
+
+
+{"name": "search_web", "description": "Search function.", "parameters": {"type": "object", "properties": {"query_list": {"type": "array", "items": {"type": "string"}, "description": "Keywords for search, list should contain 1 element."}, "query_tag": {"type": "array", "items": {"type": "string"}, "description": "Category of query"}}, "required": ["query_list", "query_tag"]}}
+
+
+When making tool calls, use XML format to invoke tools and pass parameters:
+
+
+
+param-value-1
+param-value-2
+...
+
+[e~[
+]~b]user
+When were the latest announcements from OpenAI and Gemini?[e~[
+]~b]ai
+
+```
+
+**Format Description:**
+
+- `]~!b[]~b]system`: System message start marker
+- `[e~[`: Message end marker
+- `]~b]user`: User message start marker
+- `]~b]ai`: Assistant message start marker
+- `]~b]tool`: Tool result message start marker
+- `... `: Tool definition area, each tool is wrapped with `` tag, content is JSON Schema
+- `... `: Tool call area
+- `... `: Thinking process marker during generation
+
+### Model Output Format
+
+MiniMax-M2.1 uses structured XML tag format:
+
+```xml
+
+
+["technology", "events"]
+["\"OpenAI\" \"latest\" \"release\""]
+
+
+["technology", "events"]
+["\"Gemini\" \"latest\" \"release\""]
+
+
+```
+
+Each tool call uses the `` tag, and parameters use the `` tag wrapper.
+
+## Manually Parsing Tool Call Results
+
+### Parsing Tool Calls
+
+MiniMax-M2.1 uses structured XML tags, which require a different parsing approach. The core function is as follows:
+
+```python
+import re
+import json
+from typing import Any, Optional, List, Dict
+
+
+def extract_name(name_str: str) -> str:
+ """Extract name from quoted string"""
+ name_str = name_str.strip()
+ if name_str.startswith('"') and name_str.endswith('"'):
+ return name_str[1:-1]
+ elif name_str.startswith("'") and name_str.endswith("'"):
+ return name_str[1:-1]
+ return name_str
+
+
+def convert_param_value(value: str, param_type: str) -> Any:
+ """Convert parameter value based on parameter type"""
+ if value.lower() == "null":
+ return None
+
+ param_type = param_type.lower()
+
+ if param_type in ["string", "str", "text"]:
+ return value
+ elif param_type in ["integer", "int"]:
+ try:
+ return int(value)
+ except (ValueError, TypeError):
+ return value
+ elif param_type in ["number", "float"]:
+ try:
+ val = float(value)
+ return val if val != int(val) else int(val)
+ except (ValueError, TypeError):
+ return value
+ elif param_type in ["boolean", "bool"]:
+ return value.lower() in ["true", "1"]
+ elif param_type in ["object", "array"]:
+ try:
+ return json.loads(value)
+ except json.JSONDecodeError:
+ return value
+ else:
+ # Try JSON parsing, return string if failed
+ try:
+ return json.loads(value)
+ except json.JSONDecodeError:
+ return value
+
+
+def parse_tool_calls(model_output: str, tools: Optional[List[Dict]] = None) -> List[Dict]:
+ """
+ Extract all tool calls from model output
+
+ Args:
+ model_output: Complete output text from the model
+ tools: Tool definition list for getting parameter type information, format can be:
+ - [{"name": "...", "parameters": {...}}]
+ - [{"type": "function", "function": {"name": "...", "parameters": {...}}}]
+
+ Returns:
+ Parsed tool call list, each element contains name and arguments fields
+
+ Example:
+ >>> tools = [{
+ ... "name": "get_weather",
+ ... "parameters": {
+ ... "type": "object",
+ ... "properties": {
+ ... "location": {"type": "string"},
+ ... "unit": {"type": "string"}
+ ... }
+ ... }
+ ... }]
+ >>> output = '''
+ ...
+ ... San Francisco
+ ... celsius
+ ...
+ ... '''
+ >>> result = parse_tool_calls(output, tools)
+ >>> print(result)
+ [{'name': 'get_weather', 'arguments': {'location': 'San Francisco', 'unit': 'celsius'}}]
+ """
+ # Quick check if tool call marker is present
+ if "" not in model_output:
+ return []
+
+ tool_calls = []
+
+ try:
+ # Match all blocks
+ tool_call_regex = re.compile(r"(.*?) ", re.DOTALL)
+ invoke_regex = re.compile(r"", re.DOTALL)
+ parameter_regex = re.compile(r"", re.DOTALL)
+
+ # Iterate through all tool_call blocks
+ for tool_call_match in tool_call_regex.findall(model_output):
+ # Iterate through all invokes in this block
+ for invoke_match in invoke_regex.findall(tool_call_match):
+ # Extract function name
+ name_match = re.search(r'^([^>]+)', invoke_match)
+ if not name_match:
+ continue
+
+ function_name = extract_name(name_match.group(1))
+
+ # Get parameter configuration
+ param_config = {}
+ if tools:
+ for tool in tools:
+ tool_name = tool.get("name") or tool.get("function", {}).get("name")
+ if tool_name == function_name:
+ params = tool.get("parameters") or tool.get("function", {}).get("parameters")
+ if isinstance(params, dict) and "properties" in params:
+ param_config = params["properties"]
+ break
+
+ # Extract parameters
+ param_dict = {}
+ for match in parameter_regex.findall(invoke_match):
+ param_match = re.search(r'^([^>]+)>(.*)', match, re.DOTALL)
+ if param_match:
+ param_name = extract_name(param_match.group(1))
+ param_value = param_match.group(2).strip()
+
+ # Remove leading and trailing newlines
+ if param_value.startswith('\n'):
+ param_value = param_value[1:]
+ if param_value.endswith('\n'):
+ param_value = param_value[:-1]
+
+ # Get parameter type and convert
+ param_type = "string"
+ if param_name in param_config:
+ if isinstance(param_config[param_name], dict) and "type" in param_config[param_name]:
+ param_type = param_config[param_name]["type"]
+
+ param_dict[param_name] = convert_param_value(param_value, param_type)
+
+ tool_calls.append({
+ "name": function_name,
+ "arguments": param_dict
+ })
+
+ except Exception as e:
+ print(f"Failed to parse tool calls: {e}")
+ return []
+
+ return tool_calls
+```
+
+**Usage Example:**
+
+```python
+# Define tools
+tools = [
+ {
+ "name": "get_weather",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {"type": "string"},
+ "unit": {"type": "string"}
+ },
+ "required": ["location", "unit"]
+ }
+ }
+]
+
+# Model output
+model_output = """Let me help you query the weather.
+
+
+San Francisco
+celsius
+
+ """
+
+# Parse tool calls
+tool_calls = parse_tool_calls(model_output, tools)
+
+# Output results
+for call in tool_calls:
+ print(f"Function called: {call['name']}")
+ print(f"Arguments: {call['arguments']}")
+ # Output: Function called: get_weather
+ # Arguments: {'location': 'San Francisco', 'unit': 'celsius'}
+```
+
+### Executing Tool Calls
+
+After parsing is complete, you can execute the corresponding tool and construct the return result:
+
+```python
+def execute_function_call(function_name: str, arguments: dict):
+ """Execute function call and return result"""
+ if function_name == "get_weather":
+ location = arguments.get("location", "Unknown location")
+ unit = arguments.get("unit", "celsius")
+ # Build function execution result
+ return {
+ "role": "tool",
+ "content": [
+ {
+ "name": function_name,
+ "type": "text",
+ "text": json.dumps({
+ "location": location,
+ "temperature": "25",
+ "unit": unit,
+ "weather": "Sunny"
+ }, ensure_ascii=False)
+ }
+ ]
+ }
+ elif function_name == "search_web":
+ query_list = arguments.get("query_list", [])
+ query_tag = arguments.get("query_tag", [])
+ # Simulate search results
+ return {
+ "role": "tool",
+ "content": [
+ {
+ "name": function_name,
+ "type": "text",
+ "text": f"Search keywords: {query_list}, Category: {query_tag}\nSearch results: Relevant information found"
+ }
+ ]
+ }
+
+ return None
+```
+
+### Returning Tool Execution Results to the Model
+
+After successfully parsing tool calls, you should add the tool execution results to the conversation history so that the model can access and utilize this information in subsequent interactions. Refer to [chat_template.jinja](https://huggingface.co/MiniMaxAI/MiniMax-M2.1/blob/main/chat_template.jinja) for concatenation format.
+
+## References
+
+- [MiniMax-M2.1 Model Repository](https://github.com/MiniMax-AI/MiniMax-M2.1)
+- [vLLM Project Homepage](https://github.com/vllm-project/vllm)
+- [SGLang Project Homepage](https://github.com/sgl-project/sglang)
+- [OpenAI Python SDK](https://github.com/openai/openai-python)
\ No newline at end of file
diff --git a/docs/tool_calling_guide_cn.md b/docs/tool_calling_guide_cn.md
new file mode 100644
index 0000000..98c2cfd
--- /dev/null
+++ b/docs/tool_calling_guide_cn.md
@@ -0,0 +1,499 @@
+# MiniMax-M2.1 工具调用指南
+
+[英文版](./tool_calling_guide.md) | [中文版](./tool_calling_guide_cn.md)
+
+MiniMax-M2.1 支持与 MiniMax-M2 相同的工具调用语法。
+
+## 简介
+
+MiniMax-M2.1 模型支持工具调用功能,使模型能够识别何时需要调用外部工具,并以结构化格式输出工具调用参数。本文档提供了有关如何使用 MiniMax-M2.1 工具调用功能的详细说明。
+
+## 基础示例
+
+以下 Python 脚本基于 OpenAI SDK 实现了一个天气查询工具调用示例:
+
+```python
+from openai import OpenAI
+import json
+
+client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy")
+
+def get_weather(location: str, unit: str):
+ return f"Getting the weather for {location} in {unit}..."
+
+tool_functions = {"get_weather": get_weather}
+
+tools = [{
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "description": "Get the current weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"},
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
+ },
+ "required": ["location", "unit"]
+ }
+ }
+}]
+
+response = client.chat.completions.create(
+ model=client.models.list().data[0].id,
+ messages=[{"role": "user", "content": "What's the weather like in San Francisco? use celsius."}],
+ tools=tools,
+ tool_choice="auto"
+)
+
+print(response)
+
+tool_call = response.choices[0].message.tool_calls[0].function
+print(f"Function called: {tool_call.name}")
+print(f"Arguments: {tool_call.arguments}")
+print(f"Result: {get_weather(**json.loads(tool_call.arguments))}")
+```
+
+**输出示例:**
+```
+Function called: get_weather
+Arguments: {"location": "San Francisco, CA", "unit": "celsius"}
+Result: Getting the weather for San Francisco, CA in celsius...
+```
+
+## 手动解析模型输出
+
+**我们强烈建议使用 vLLM 或 SGLnag 来解析工具调用。** 如果您无法使用支持 MiniMax-M2.1 的推理引擎(如 vLLM 和 SGLang)的内置解析器,或需要使用其他推理框架(如 transformers、TGI 等),您可以使用以下方法手动解析模型的原始输出。这种方法需要您自己解析模型输出的 XML 标签格式。
+
+### 使用 Transformers 的示例
+
+这是一个使用 transformers 库的完整示例:
+
+```python
+from transformers import AutoTokenizer
+
+def get_default_tools():
+ return [
+ {
+ "name": "get_current_weather",
+ "description": "Get the latest weather for a location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "A certain city, such as Beijing, Shanghai"
+ }
+ },
+ }
+ "required": ["location"],
+ "type": "object"
+ }
+ ]
+
+# Load model and tokenizer
+tokenizer = AutoTokenizer.from_pretrained(model_id)
+prompt = "What's the weather like in Shanghai today?"
+messages = [
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": prompt},
+]
+
+# Enable function calling tools
+tools = get_default_tools()
+
+# Apply chat template and include tool definitions
+text = tokenizer.apply_chat_template(
+ messages,
+ tokenize=False,
+ add_generation_prompt=True,
+ tools=tools
+)
+
+# Send request (using any inference service)
+import requests
+payload = {
+ "model": "MiniMaxAI/MiniMax-M2.1",
+ "prompt": text,
+ "max_tokens": 4096
+}
+response = requests.post(
+ "http://localhost:8000/v1/completions",
+ headers={"Content-Type": "application/json"},
+ json=payload,
+ stream=False,
+)
+
+# Model output needs manual parsing
+raw_output = response.json()["choices"][0]["text"]
+print("Raw output:", raw_output)
+
+# Use the parsing function below to process the output
+tool_calls = parse_tool_calls(raw_output, tools)
+```
+
+## 🛠️ 工具调用定义
+
+### 工具结构
+
+工具调用需要在请求体中定义 `tools` 字段。每个工具由以下部分组成:
+
+```json
+{
+ "tools": [
+ {
+ "name": "search_web",
+ "description": "Search function.",
+ "parameters": {
+ "properties": {
+ "query_list": {
+ "description": "Keywords for search, list should contain 1 element.",
+ "items": { "type": "string" },
+ "type": "array"
+ },
+ "query_tag": {
+ "description": "Category of query",
+ "items": { "type": "string" },
+ "type": "array"
+ }
+ },
+ "required": [ "query_list", "query_tag" ],
+ "type": "object"
+ }
+ }
+ ]
+}
+```
+
+**字段说明:**
+- `name`:函数名称
+- `description`:函数描述
+- `parameters`:函数参数定义
+ - `properties`:参数属性定义,其中键是参数名称,值包含详细的参数描述
+ - `required`:必需参数列表
+ - `type`:参数类型(通常为 "object")
+
+### 内部处理格式
+
+在 MiniMax-M2.1 模型内部处理时,工具定义会被转换为特殊格式并连接到输入文本中。以下是一个完整示例:
+
+```
+]~!b[]~b]system
+You are a helpful assistant.
+
+# Tools
+You may call one or more tools to assist with the user query.
+Here are the tools available in JSONSchema format:
+
+
+{"name": "search_web", "description": "Search function.", "parameters": {"type": "object", "properties": {"query_list": {"type": "array", "items": {"type": "string"}, "description": "Keywords for search, list should contain 1 element."}, "query_tag": {"type": "array", "items": {"type": "string"}, "description": "Category of query"}}, "required": ["query_list", "query_tag"]}}
+
+
+When making tool calls, use XML format to invoke tools and pass parameters:
+
+
+
+param-value-1
+param-value-2
+...
+
+[e~[
+]~b]user
+When were the latest announcements from OpenAI and Gemini?[e~[
+]~b]ai
+
+```
+
+**格式说明:**
+
+- `]~!b[]~b]system`:系统消息开始标记
+- `[e~[`:消息结束标记
+- `]~b]user`:用户消息开始标记
+- `]~b]ai`:助手消息开始标记
+- `]~b]tool`:工具结果消息开始标记
+- `... `:工具定义区域,每个工具都用 `` 标签包装,内容为 JSON Schema
+- `... `:工具调用区域
+- `... `:生成过程中的思考过程标记
+
+### 模型输出格式
+
+MiniMax-M2.1 使用结构化的 XML 标签格式:
+
+```xml
+
+
+["technology", "events"]
+["\"OpenAI\" \"latest\" \"release\""]
+
+
+["technology", "events"]
+["\"Gemini\" \"latest\" \"release\""]
+
+
+```
+
+每个工具调用使用 `` 标签,参数使用 `` 标签包装。
+
+## 手动解析工具调用结果
+
+### 解析工具调用
+
+MiniMax-M2.1 使用结构化的 XML 标签,这需要一种不同的解析方法。核心函数如下:
+
+```python
+import re
+import json
+from typing import Any, Optional, List, Dict
+
+
+def extract_name(name_str: str) -> str:
+ """Extract name from quoted string"""
+ name_str = name_str.strip()
+ if name_str.startswith('"') and name_str.endswith('"'):
+ return name_str[1:-1]
+ elif name_str.startswith("'") and name_str.endswith("'"):
+ return name_str[1:-1]
+ return name_str
+
+
+def convert_param_value(value: str, param_type: str) -> Any:
+ """Convert parameter value based on parameter type"""
+ if value.lower() == "null":
+ return None
+
+ param_type = param_type.lower()
+
+ if param_type in ["string", "str", "text"]:
+ return value
+ elif param_type in ["integer", "int"]:
+ try:
+ return int(value)
+ except (ValueError, TypeError):
+ return value
+ elif param_type in ["number", "float"]:
+ try:
+ val = float(value)
+ return val if val != int(val) else int(val)
+ except (ValueError, TypeError):
+ return value
+ elif param_type in ["boolean", "bool"]:
+ return value.lower() in ["true", "1"]
+ elif param_type in ["object", "array"]:
+ try:
+ return json.loads(value)
+ except json.JSONDecodeError:
+ return value
+ else:
+ # Try JSON parsing, return string if failed
+ try:
+ return json.loads(value)
+ except json.JSONDecodeError:
+ return value
+
+
+def parse_tool_calls(model_output: str, tools: Optional[List[Dict]] = None) -> List[Dict]:
+ """
+ Extract all tool calls from model output
+
+ Args:
+ model_output: Complete output text from the model
+ tools: Tool definition list for getting parameter type information, format can be:
+ - [{"name": "...", "parameters": {...}}]
+ - [{"type": "function", "function": {"name": "...", "parameters": {...}}}]
+
+ Returns:
+ Parsed tool call list, each element contains name and arguments fields
+
+ Example:
+ >>> tools = [{
+ ... "name": "get_weather",
+ ... "parameters": {
+ ... "type": "object",
+ ... "properties": {
+ ... "location": {"type": "string"},
+ ... "unit": {"type": "string"}
+ ... }
+ ... }
+ ... }]
+ >>> output = '''
+ ...
+ ... San Francisco
+ ... celsius
+ ...
+ ... '''
+ >>> result = parse_tool_calls(output, tools)
+ >>> print(result)
+ [{'name': 'get_weather', 'arguments': {'location': 'San Francisco', 'unit': 'celsius'}}]
+ """
+ # Quick check if tool call marker is present
+ if "" not in model_output:
+ return []
+
+ tool_calls = []
+
+ try:
+ # Match all blocks
+ tool_call_regex = re.compile(r"(.*?) ", re.DOTALL)
+ invoke_regex = re.compile(r"", re.DOTALL)
+ parameter_regex = re.compile(r"", re.DOTALL)
+
+ # Iterate through all tool_call blocks
+ for tool_call_match in tool_call_regex.findall(model_output):
+ # Iterate through all invokes in this block
+ for invoke_match in invoke_regex.findall(tool_call_match):
+ # Extract function name
+ name_match = re.search(r'^([^>]+)', invoke_match)
+ if not name_match:
+ continue
+
+ function_name = extract_name(name_match.group(1))
+
+ # Get parameter configuration
+ param_config = {}
+ if tools:
+ for tool in tools:
+ tool_name = tool.get("name") or tool.get("function", {}).get("name")
+ if tool_name == function_name:
+ params = tool.get("parameters") or tool.get("function", {}).get("parameters")
+ if isinstance(params, dict) and "properties" in params:
+ param_config = params["properties"]
+ break
+
+ # Extract parameters
+ param_dict = {}
+ for match in parameter_regex.findall(invoke_match):
+ param_match = re.search(r'^([^>]+)>(.*)', match, re.DOTALL)
+ if param_match:
+ param_name = extract_name(param_match.group(1))
+ param_value = param_match.group(2).strip()
+
+ # Remove leading and trailing newlines
+ if param_value.startswith('\n'):
+ param_value = param_value[1:]
+ if param_value.endswith('\n'):
+ param_value = param_value[:-1]
+
+ # Get parameter type and convert
+ param_type = "string"
+ if param_name in param_config:
+ if isinstance(param_config[param_name], dict) and "type" in param_config[param_name]:
+ param_type = param_config[param_name]["type"]
+
+ param_dict[param_name] = convert_param_value(param_value, param_type)
+
+ tool_calls.append({
+ "name": function_name,
+ "arguments": param_dict
+ })
+
+ except Exception as e:
+ print(f"Failed to parse tool calls: {e}")
+ return []
+
+ return tool_calls
+```
+
+**使用示例:**
+
+```python
+# Define tools
+tools = [
+ {
+ "name": "get_weather",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {"type": "string"},
+ "unit": {"type": "string"}
+ },
+ "required": ["location", "unit"]
+ }
+ }
+]
+
+# Model output
+model_output = """Let me help you query the weather.
+
+
+San Francisco
+celsius
+
+ """
+
+# Parse tool calls
+tool_calls = parse_tool_calls(model_output, tools)
+
+# Output results
+for call in tool_calls:
+ print(f"Function called: {call['name']}")
+ print(f"Arguments: {call['arguments']}")
+ # Output: Function called: get_weather
+ # Arguments: {'location': 'San Francisco', 'unit': 'celsius'}
+```
+
+### 执行工具调用
+
+完成解析后,您可以执行相应的工具并构造返回结果:
+
+```python
+def execute_function_call(function_name: str, arguments: dict):
+ """Execute function call and return result"""
+ if function_name == "get_weather":
+ location = arguments.get("location", "Unknown location")
+ unit = arguments.get("unit", "celsius")
+ # Build function execution result
+ return {
+ "role": "tool",
+ "content": [
+ {
+ "name": function_name,
+ "type": "text",
+ "text": json.dumps({
+ "location": location,
+ "temperature": "25",
+ "unit": unit,
+ "weather": "Sunny"
+ }, ensure_ascii=False)
+ }
+ ]
+ }
+ elif function_name == "search_web":
+ query_list = arguments.get("query_list", [])
+ query_tag = arguments.get("query_tag", [])
+ # Simulate search results
+ return {
+ "role": "tool",
+ "content": [
+ {
+ "name": function_name,
+ "type": "text",
+ "text": f"Search keywords: {query_list}, Category: {query_tag}\nSearch results: Relevant information found"
+ }
+ ]
+ }
+
+ return None
+```
+
+### 将工具执行结果返回给模型
+
+在成功解析工具调用后,您应该将工具执行结果添加到对话历史中,以便模型在后续交互中可以访问和利用这些信息。请参考 [chat_template.jinja](https://huggingface.co/MiniMaxAI/MiniMax-M2.1/blob/main/chat_template.jinja) 了解连接格式。
+
+## 参考文献
+
+- [MiniMax-M2.1 模型仓库](https://github.com/MiniMax-AI/MiniMax-M2.1)
+- [vLLM 项目主页](https://github.com/vllm-project/vllm)
+- [SGLang 项目主页](https://github.com/sgl-project/sglang)
+- [OpenAI Python SDK](https://github.com/openai/openai-python)
+
+## 获取支持
+
+如果遇到任何问题:
+
+- 通过邮箱 [model@minimax.io](mailto:model@minimax.io) 等官方渠道联系我们的技术支持团队
+
+- 在我们的仓库提交 Issue
+
+- 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
+
+我们会持续优化模型的使用体验,欢迎反馈!
\ No newline at end of file
diff --git a/docs/transformers_deploy_guide.md b/docs/transformers_deploy_guide.md
new file mode 100644
index 0000000..69b36d1
--- /dev/null
+++ b/docs/transformers_deploy_guide.md
@@ -0,0 +1,91 @@
+# MiniMax M2.1 Model Transformers Deployment Guide
+
+[English Version](./transformers_deploy_guide.md) | [Chinese Version](./transformers_deploy_guide_cn.md)
+
+## Applicable Models
+
+This document applies to the following models. You only need to change the model name during deployment.
+
+- [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
+- [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
+
+The deployment process is illustrated below using MiniMax-M2.1 as an example.
+
+## System Requirements
+
+- OS: Linux
+
+- Python: 3.9 - 3.12
+
+- Transformers: 4.57.1
+
+- GPU:
+
+ - compute capability 7.0 or higher
+
+ - Memory requirements: 220 GB for weights.
+
+## Deployment with Python
+
+It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
+
+We recommend installing Transformers in a fresh Python environment:
+
+```bash
+uv pip install transformers==4.57.1 torch accelerate --torch-backend=auto
+```
+
+Run the following Python script to run the model. Transformers will automatically download and cache the MiniMax-M2.1 model from Hugging Face.
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
+import torch
+
+MODEL_PATH = "MiniMaxAI/MiniMax-M2.1"
+
+model = AutoModelForCausalLM.from_pretrained(
+ MODEL_PATH,
+ device_map="auto",
+ trust_remote_code=True,
+)
+tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
+
+messages = [
+ {"role": "user", "content": [{"type": "text", "text": "What is your favourite condiment?"}]},
+ {"role": "assistant", "content": [{"type": "text", "text": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}]},
+ {"role": "user", "content": [{"type": "text", "text": "Do you have mayonnaise recipes?"}]}
+]
+
+model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
+
+generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=model.generation_config)
+
+response = tokenizer.batch_decode(generated_ids)[0]
+
+print(response)
+```
+
+## Common Issues
+
+### Hugging Face Network Issues
+
+If you encounter network issues, you can set up a proxy before pulling the model.
+
+```bash
+export HF_ENDPOINT=https://hf-mirror.com
+```
+
+### MiniMax-M2 model is not currently supported
+
+Please check that trust_remote_code=True.
+
+## Getting Support
+
+If you encounter any issues while deploying the MiniMax model:
+
+- Contact our technical support team through official channels such as email at [model@minimax.io](mailto:model@minimax.io)
+
+- Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
+
+We continuously optimize the deployment experience for our models. Feedback is welcome!
+
diff --git a/docs/transformers_deploy_guide_cn.md b/docs/transformers_deploy_guide_cn.md
new file mode 100644
index 0000000..24b33f7
--- /dev/null
+++ b/docs/transformers_deploy_guide_cn.md
@@ -0,0 +1,92 @@
+# MiniMax M2.1 模型 Transformers 部署指南
+
+[英文版](./transformers_deploy_guide.md) | [中文版](./transformers_deploy_guide_cn.md)
+
+## 本文档适用模型
+
+本文档适用以下模型,只需在部署时修改模型名称即可。
+
+- [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
+- [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
+
+以下以 MiniMax-M2.1 为例说明部署流程。
+
+## 环境要求
+
+- OS:Linux
+
+- Python:3.9 - 3.12
+
+- Transformers: 4.57.1
+
+- GPU:
+
+ - compute capability 7.0 or higher
+
+ - 显存需求:权重需要 220 GB
+
+## 使用 Python 部署
+
+建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
+
+建议在全新的 Python 环境中安装 Transformers:
+
+```bash
+uv pip install transformers==4.57.1 torch accelerate --torch-backend=auto
+```
+
+运行如下 Python 命令运行模型,Transformers 会自动从 Huggingface 下载并缓存 MiniMax-M2.1 模型。
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
+import torch
+
+MODEL_PATH = "MiniMaxAI/MiniMax-M2.1"
+
+model = AutoModelForCausalLM.from_pretrained(
+ MODEL_PATH,
+ device_map="auto",
+ trust_remote_code=True,
+)
+tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
+
+messages = [
+ {"role": "user", "content": [{"type": "text", "text": "What is your favourite condiment?"}]},
+ {"role": "assistant", "content": [{"type": "text", "text": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}]},
+ {"role": "user", "content": [{"type": "text", "text": "Do you have mayonnaise recipes?"}]}
+]
+
+model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
+
+generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=model.generation_config)
+
+response = tokenizer.batch_decode(generated_ids)[0]
+
+print(response)
+```
+
+## 常见问题
+
+### Huggingface 网络问题
+
+如果遇到网络问题,可以设置代理后再进行拉取。
+
+```bash
+export HF_ENDPOINT=https://hf-mirror.com
+```
+
+### MiniMax-M2 model is not currently supported
+
+请确认开启 trust_remote_code=True。
+
+## 获取支持
+
+如果在部署 MiniMax 模型过程中遇到任何问题:
+
+- 通过邮箱 [model@minimax.io](mailto:model@minimax.io) 等官方渠道联系我们的技术支持团队
+
+- 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
+
+- 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
+
+我们会持续优化模型的部署体验,欢迎反馈!
diff --git a/docs/vllm_deploy_guide.md b/docs/vllm_deploy_guide.md
new file mode 100644
index 0000000..80a281e
--- /dev/null
+++ b/docs/vllm_deploy_guide.md
@@ -0,0 +1,113 @@
+# MiniMax M2.1 Model vLLM Deployment Guide
+
+[English Version](./vllm_deploy_guide.md) | [Chinese Version](./vllm_deploy_guide_cn.md)
+
+We recommend using [vLLM](https://docs.vllm.ai/en/stable/) to deploy the [MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1) model. vLLM is a high-performance inference engine with excellent serving throughput, efficient and intelligent memory management, powerful batch request processing capabilities, and deeply optimized underlying performance. We recommend reviewing vLLM's official documentation to check hardware compatibility before deployment.
+
+## Applicable Models
+
+This document applies to the following models. You only need to change the model name during deployment.
+
+- [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
+
+The deployment process is illustrated below using MiniMax-M2.1 as an example.
+
+## System Requirements
+
+- OS: Linux
+
+- Python: 3.9 - 3.12
+
+- GPU:
+
+ - compute capability 7.0 or higher
+
+ - Memory requirements: 220 GB for weights, 240 GB per 1M context tokens
+
+The following are recommended configurations; actual requirements should be adjusted based on your use case:
+
+- 4x 96GB GPUs: Supported context length of up to 400K tokens.
+
+- 8x 144GB GPUs: Supported context length of up to 3M tokens.
+
+## Deployment with Python
+
+It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
+
+We recommend installing vLLM in a fresh Python environment:
+
+```bash
+uv venv
+source .venv/bin/activate
+uv pip install -U vllm --extra-index-url https://wheels.vllm.ai/nightly
+```
+
+Run the following command to start the vLLM server. vLLM will automatically download and cache the MiniMax-M2.1 model from Hugging Face.
+
+4-GPU deployment command:
+
+```bash
+SAFETENSORS_FAST_GPU=1 vllm serve \
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
+ --tensor-parallel-size 4 \
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
+ --reasoning-parser minimax_m2_append_think
+```
+
+8-GPU deployment command:
+
+```bash
+SAFETENSORS_FAST_GPU=1 vllm serve \
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
+ --enable_expert_parallel --tensor-parallel-size 8 \
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
+ --reasoning-parser minimax_m2_append_think
+```
+
+## Testing Deployment
+
+After startup, you can test the vLLM OpenAI-compatible API with the following command:
+
+```bash
+curl http://localhost:8000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "MiniMaxAI/MiniMax-M2.1",
+ "messages": [
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
+ ]
+ }'
+```
+
+## Common Issues
+
+### MiniMax-M2 model is not currently supported
+
+This vLLM version is outdated. Please upgrade to the latest version.
+
+### torch.AcceleratorError: CUDA error: an illegal memory access was encountered
+Add `--compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"` to the startup parameters to resolve this issue. For example:
+
+```bash
+SAFETENSORS_FAST_GPU=1 vllm serve \
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
+ --enable_expert_parallel --tensor-parallel-size 8 \
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
+ --reasoning-parser minimax_m2_append_think \
+ --compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"
+```
+
+### Output is garbled
+
+If you encounter corrupted output when using vLLM to serve these models, you can upgrade to the nightly version (ensure it is a version after commit [cf3eacfe58fa9e745c2854782ada884a9f992cf7](https://github.com/vllm-project/vllm/commit/cf3eacfe58fa9e745c2854782ada884a9f992cf7))
+
+## Getting Support
+
+If you encounter any issues while deploying the MiniMax model:
+
+- Contact our technical support team through official channels such as email at [model@minimax.io](mailto:model@minimax.io)
+
+- Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
+
+We continuously optimize the deployment experience for our models. Feedback is welcome!
diff --git a/docs/vllm_deploy_guide_cn.md b/docs/vllm_deploy_guide_cn.md
new file mode 100644
index 0000000..facd14d
--- /dev/null
+++ b/docs/vllm_deploy_guide_cn.md
@@ -0,0 +1,123 @@
+# MiniMax M2.1 模型 vLLM 部署指南
+
+[英文版](./vllm_deploy_guide.md) | [中文版](./vllm_deploy_guide_cn.md)
+
+我们推荐使用 [vLLM](https://docs.vllm.ai/en/stable/) 来部署 [MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1) 模型。vLLM 是一个高性能的推理引擎,其具有卓越的服务吞吐、高效智能的内存管理机制、强大的批量请求处理能力、深度优化的底层性能等特性。我们建议在部署之前查看 vLLM 的官方文档以检查硬件兼容性。
+
+## 本文档适用模型
+
+本文档适用以下模型,只需在部署时修改模型名称即可。
+
+- [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
+
+以下以 MiniMax-M2.1 为例说明部署流程。
+
+## 环境要求
+
+- OS:Linux
+
+- Python:3.9 - 3.12
+
+- GPU:
+
+ - compute capability 7.0 or higher
+
+ - 显存需求:权重需要 220 GB,每 1M 上下文 token 需要 240 GB
+
+以下为推荐配置,实际需求请根据业务场景调整:
+
+- 96G x4 GPU:支持 40 万 token 的总上下文。
+
+- 144G x8 GPU:支持长达 300 万 token 的总上下文。
+
+## 使用 Python 部署
+
+建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
+
+建议在全新的 Python 环境中安装 vLLM:
+
+```bash
+uv venv
+source .venv/bin/activate
+uv pip install -U vllm --extra-index-url https://wheels.vllm.ai/nightly
+```
+
+运行如下命令启动 vLLM 服务器,vLLM 会自动从 Huggingface 下载并缓存 MiniMax-M2.1 模型。
+
+4 卡部署命令:
+
+```bash
+SAFETENSORS_FAST_GPU=1 vllm serve \
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
+ --tensor-parallel-size 4 \
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
+ --reasoning-parser minimax_m2_append_think
+```
+
+8 卡部署命令:
+
+```bash
+SAFETENSORS_FAST_GPU=1 vllm serve \
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
+ --enable_expert_parallel --tensor-parallel-size 8 \
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
+ --reasoning-parser minimax_m2_append_think
+```
+
+## 测试部署
+
+启动后,可以通过如下命令测试 vLLM OpenAI 兼容接口:
+
+```bash
+curl http://localhost:8000/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "MiniMaxAI/MiniMax-M2.1",
+ "messages": [
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
+ ]
+ }'
+```
+
+## 常见问题
+
+### Huggingface 网络问题
+
+如果遇到网络问题,可以设置代理后再进行拉取。
+
+```bash
+export HF_ENDPOINT=https://hf-mirror.com
+```
+
+### MiniMax-M2 model is not currently supported
+
+该 vLLM 版本过旧,请升级到最新版本。
+
+### torch.AcceleratorError: CUDA error: an illegal memory access was encountered
+在启动参数添加 `--compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"` 可以解决。例如:
+
+```bash
+SAFETENSORS_FAST_GPU=1 vllm serve \
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
+ --enable_expert_parallel --tensor-parallel-size 8 \
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
+ --reasoning-parser minimax_m2_append_think \
+ --compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"
+```
+
+### 模型输出乱码
+
+如果您在使用 vLLM 运行这些模型时遇到输出乱码,可以升级到最新版本(请至少确保版本在提交 [cf3eacfe58fa9e745c2854782ada884a9f992cf7](https://github.com/vllm-project/vllm/commit/cf3eacfe58fa9e745c2854782ada884a9f992cf7) 之后)。
+
+## 获取支持
+
+如果在部署 MiniMax 模型过程中遇到任何问题:
+
+- 通过邮箱 [model@minimax.io](mailto:model@minimax.io) 等官方渠道联系我们的技术支持团队
+
+- 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
+
+- 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
+
+我们会持续优化模型的部署体验,欢迎反馈!
diff --git a/figures/bench.png b/figures/bench.png
new file mode 100644
index 0000000..f44bc20
Binary files /dev/null and b/figures/bench.png differ
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000..30b418a
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "bos_token_id": 200019,
+ "do_sample": true,
+ "eos_token_id": 200020,
+ "temperature": 1.0,
+ "top_p": 0.95,
+ "top_k": 40,
+ "transformers_version": "4.46.1"
+}
diff --git a/merges.txt b/merges.txt
new file mode 100644
index 0000000..6a86280
--- /dev/null
+++ b/merges.txt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca47ea60cf5bd48832586adc264f439d0ea4254176b24a95f85eaa9750e2b5f9
+size 2414077
diff --git a/model-00000-of-00130.safetensors b/model-00000-of-00130.safetensors
new file mode 100644
index 0000000..c5b0683
--- /dev/null
+++ b/model-00000-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9bd3c08aa8b8cf7fe9fdc086a9cbcb26ab6335c5eb1c2c8f95cf025de8472020
+size 3693062776
diff --git a/model-00001-of-00130.safetensors b/model-00001-of-00130.safetensors
new file mode 100644
index 0000000..11a6e2a
--- /dev/null
+++ b/model-00001-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb8dca2263e8391673f27cb0445c54c5b93a02a8300b899d2f488dd4313e7c5a
+size 1208321208
diff --git a/model-00002-of-00130.safetensors b/model-00002-of-00130.safetensors
new file mode 100644
index 0000000..268ccfc
--- /dev/null
+++ b/model-00002-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0fb733baac056876c028dcb8d9781df38f5ae9f5cbe9f28ba98149fd413af35
+size 2463868968
diff --git a/model-00003-of-00130.safetensors b/model-00003-of-00130.safetensors
new file mode 100644
index 0000000..14258ef
--- /dev/null
+++ b/model-00003-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63f50ec50a90a93a6eb7c1a82335f70b82e18ca0538a1ab74a50a8bb04ddaaaa
+size 1208321208
diff --git a/model-00004-of-00130.safetensors b/model-00004-of-00130.safetensors
new file mode 100644
index 0000000..f34840d
--- /dev/null
+++ b/model-00004-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1655ebf67c46d6e29206f7c66901165caee47dad28401cd90e71f8f44511ae2
+size 2463868968
diff --git a/model-00005-of-00130.safetensors b/model-00005-of-00130.safetensors
new file mode 100644
index 0000000..8397ca3
--- /dev/null
+++ b/model-00005-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91688a17f8082b76e7dc64074c20c79fddeb8abc5fce2007054f5d65a78d79e3
+size 1208321208
diff --git a/model-00006-of-00130.safetensors b/model-00006-of-00130.safetensors
new file mode 100644
index 0000000..0afcf74
--- /dev/null
+++ b/model-00006-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e64fc1dd6ed7eea3e7e82dcd53d030e2e5dbd39323243bdc988a74fceebd91d8
+size 2463868968
diff --git a/model-00007-of-00130.safetensors b/model-00007-of-00130.safetensors
new file mode 100644
index 0000000..fc88455
--- /dev/null
+++ b/model-00007-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9226113b5690ad894557e3d11a17b9e5869549ba619e07f5fb79a9bb8da6a146
+size 1208321208
diff --git a/model-00008-of-00130.safetensors b/model-00008-of-00130.safetensors
new file mode 100644
index 0000000..1d30627
--- /dev/null
+++ b/model-00008-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f590916e8b21f4d156c7fddf059b7574cb78cbf8929d9fecf47d144f64529f7e
+size 2463868968
diff --git a/model-00009-of-00130.safetensors b/model-00009-of-00130.safetensors
new file mode 100644
index 0000000..f5fdaf1
--- /dev/null
+++ b/model-00009-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f6699cbc9c6e0194a3e65930673da36e4e169ac092917a75ad0581597b641e00
+size 1208321208
diff --git a/model-00010-of-00130.safetensors b/model-00010-of-00130.safetensors
new file mode 100644
index 0000000..4c4d3eb
--- /dev/null
+++ b/model-00010-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef6f8825fe6dabc5e6d515b561422ce29621744d1492e008c3b304cb1b837c08
+size 2463868968
diff --git a/model-00011-of-00130.safetensors b/model-00011-of-00130.safetensors
new file mode 100644
index 0000000..dcb34da
--- /dev/null
+++ b/model-00011-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:656b88a33fdaaf6a68e0d0296a2961e1a1597a03eb434d341577ce14ffb9352e
+size 1208321208
diff --git a/model-00012-of-00130.safetensors b/model-00012-of-00130.safetensors
new file mode 100644
index 0000000..dad4e43
--- /dev/null
+++ b/model-00012-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:810124526c634e6c3afc825a264258d5f35888b0d5c066e033e18721ebf09e4e
+size 2463868968
diff --git a/model-00013-of-00130.safetensors b/model-00013-of-00130.safetensors
new file mode 100644
index 0000000..96c9a88
--- /dev/null
+++ b/model-00013-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e7fe343f1a89cafd96078d70c6ab099b9a97b07913d4ed7619814ba4467b7848
+size 1208321208
diff --git a/model-00014-of-00130.safetensors b/model-00014-of-00130.safetensors
new file mode 100644
index 0000000..4c766c2
--- /dev/null
+++ b/model-00014-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8923399edc6ed24a51f409661fffdc028133182871ba1d547161b80788865824
+size 2463868968
diff --git a/model-00015-of-00130.safetensors b/model-00015-of-00130.safetensors
new file mode 100644
index 0000000..dc6a2b3
--- /dev/null
+++ b/model-00015-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab22edaf792e0513275cb49a0f3c03a75e43b0c357863095f7e8a3260447374f
+size 1208321208
diff --git a/model-00016-of-00130.safetensors b/model-00016-of-00130.safetensors
new file mode 100644
index 0000000..7c3d3e4
--- /dev/null
+++ b/model-00016-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f50a02b4a1324bd6c5147601fe9b5a6368b6ecf08f39790cee0b5dc8dbb049eb
+size 2463868968
diff --git a/model-00017-of-00130.safetensors b/model-00017-of-00130.safetensors
new file mode 100644
index 0000000..f607d1a
--- /dev/null
+++ b/model-00017-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b7cbd569453afaf6270d6dc35c79c8d60f65d70d68f1aec96886c479cbd23242
+size 1208321208
diff --git a/model-00018-of-00130.safetensors b/model-00018-of-00130.safetensors
new file mode 100644
index 0000000..74ab439
--- /dev/null
+++ b/model-00018-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55bf84a8ffe38d2f2495e382f8d5974c206e8da739fdac2cfefa5856a7ada125
+size 2463868968
diff --git a/model-00019-of-00130.safetensors b/model-00019-of-00130.safetensors
new file mode 100644
index 0000000..8113940
--- /dev/null
+++ b/model-00019-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3acc3843a6908699a2dc2284d3aa10454c9ea12470228602db919a979cfcd3b7
+size 1208321208
diff --git a/model-00020-of-00130.safetensors b/model-00020-of-00130.safetensors
new file mode 100644
index 0000000..c137cdd
--- /dev/null
+++ b/model-00020-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea68e1510785e5e4c2b73eb93f0af7191a74bc56a74860958edf763ce3b5b1a3
+size 2463870000
diff --git a/model-00021-of-00130.safetensors b/model-00021-of-00130.safetensors
new file mode 100644
index 0000000..72113ff
--- /dev/null
+++ b/model-00021-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31320dc5ef42bbe7ce0d2f0b3c70c6dc10e8d5dbd6547316c16730daf38b08d5
+size 1208321720
diff --git a/model-00022-of-00130.safetensors b/model-00022-of-00130.safetensors
new file mode 100644
index 0000000..d84e8c5
--- /dev/null
+++ b/model-00022-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2ab0509bf5f5c7f6d5b09f73b59074ee1b253a7f085594d542997ea48fab784b
+size 2463870000
diff --git a/model-00023-of-00130.safetensors b/model-00023-of-00130.safetensors
new file mode 100644
index 0000000..3bb9b85
--- /dev/null
+++ b/model-00023-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00349db757e9345161b453f88546b3cea9e2aad7c113aa69cd0d6241598b4c3e
+size 1208321720
diff --git a/model-00024-of-00130.safetensors b/model-00024-of-00130.safetensors
new file mode 100644
index 0000000..35ba4fc
--- /dev/null
+++ b/model-00024-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d3c86760365e7ee331357fc7fdf888c6912074864a59a55f8ecc1192c74dc31
+size 2463870000
diff --git a/model-00025-of-00130.safetensors b/model-00025-of-00130.safetensors
new file mode 100644
index 0000000..b9829c1
--- /dev/null
+++ b/model-00025-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4923ec5f7c08462f2a8c0d37753a7dcca453cdb0404725fcf69efcb9c2cacb71
+size 1208321720
diff --git a/model-00026-of-00130.safetensors b/model-00026-of-00130.safetensors
new file mode 100644
index 0000000..83985d0
--- /dev/null
+++ b/model-00026-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:241a38fc278951a9a2c9d83f1b76d88f5c989ae126aa49d8469a152d35e08e63
+size 2463870000
diff --git a/model-00027-of-00130.safetensors b/model-00027-of-00130.safetensors
new file mode 100644
index 0000000..e5d3898
--- /dev/null
+++ b/model-00027-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b45e70b66d671b104b8d6093da82bdbfae119c0f93a7a379fb714cc7ff29f7f
+size 1208321720
diff --git a/model-00028-of-00130.safetensors b/model-00028-of-00130.safetensors
new file mode 100644
index 0000000..bc2bad0
--- /dev/null
+++ b/model-00028-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3103c28e49707500d99322ad9bd50f79fab1e3a0fce841916f03eb9a741b9bcb
+size 2463870000
diff --git a/model-00029-of-00130.safetensors b/model-00029-of-00130.safetensors
new file mode 100644
index 0000000..6321f9f
--- /dev/null
+++ b/model-00029-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9609517b64714c2c77f05fd460c9a64c6f138bc8973f6b6095ac0becb112303
+size 1208321720
diff --git a/model-00030-of-00130.safetensors b/model-00030-of-00130.safetensors
new file mode 100644
index 0000000..0125178
--- /dev/null
+++ b/model-00030-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d71876abd04435d37bbfc8f98b2d963de2b7d048bee937852559e64434bcc5e6
+size 2463870000
diff --git a/model-00031-of-00130.safetensors b/model-00031-of-00130.safetensors
new file mode 100644
index 0000000..b185889
--- /dev/null
+++ b/model-00031-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b5763545dabf7e6df15de34ff46589fcfeec76623b48b1cd3942575f8644c7a1
+size 1208321720
diff --git a/model-00032-of-00130.safetensors b/model-00032-of-00130.safetensors
new file mode 100644
index 0000000..2e71ff7
--- /dev/null
+++ b/model-00032-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab47c794e7132829154348ad1ad82fc132cf99165da1d84b416daf997aafa4d0
+size 2463870000
diff --git a/model-00033-of-00130.safetensors b/model-00033-of-00130.safetensors
new file mode 100644
index 0000000..abc428c
--- /dev/null
+++ b/model-00033-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4847bbd2db4dfb401b164eb33e099b112e4f3e1d74f8704794b13439f6b6410
+size 1208321720
diff --git a/model-00034-of-00130.safetensors b/model-00034-of-00130.safetensors
new file mode 100644
index 0000000..f1785c6
--- /dev/null
+++ b/model-00034-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b04f732c7ef8239d2197ce8c0d5a63b39834ed59a73d6d769187dba7dd9007e2
+size 2463870000
diff --git a/model-00035-of-00130.safetensors b/model-00035-of-00130.safetensors
new file mode 100644
index 0000000..247b06b
--- /dev/null
+++ b/model-00035-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5414b2506d712d1e80b57409995ba88a278c9dddb160fa53ba45ea72a2174b83
+size 1208321720
diff --git a/model-00036-of-00130.safetensors b/model-00036-of-00130.safetensors
new file mode 100644
index 0000000..581f70e
--- /dev/null
+++ b/model-00036-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fa0a9cf5a07a11c2355a6b69a41c12a73e170a2d4d87fc15ec90167711c5e73
+size 2463870000
diff --git a/model-00037-of-00130.safetensors b/model-00037-of-00130.safetensors
new file mode 100644
index 0000000..86379f0
--- /dev/null
+++ b/model-00037-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4166a183ee73cb7bf42b2fb7c1e4351c5d5f5e32abad4db2ae353ae0cfdc68ff
+size 1208321720
diff --git a/model-00038-of-00130.safetensors b/model-00038-of-00130.safetensors
new file mode 100644
index 0000000..43848ea
--- /dev/null
+++ b/model-00038-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b00505426db2e221f9e1ff5bf6667839c79077680267605f01ce8561a3a6db0
+size 2463870000
diff --git a/model-00039-of-00130.safetensors b/model-00039-of-00130.safetensors
new file mode 100644
index 0000000..f66b155
--- /dev/null
+++ b/model-00039-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3942e579d500a1b08e25250b4a9d4b8fdaaa2002bd33a331d0557c11f1192300
+size 1208321720
diff --git a/model-00040-of-00130.safetensors b/model-00040-of-00130.safetensors
new file mode 100644
index 0000000..0fa616b
--- /dev/null
+++ b/model-00040-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f63bc4df96e5db36551bafe8a19c52671a720715259b5793daf6f627f1a3f712
+size 2463870000
diff --git a/model-00041-of-00130.safetensors b/model-00041-of-00130.safetensors
new file mode 100644
index 0000000..8d1e1a1
--- /dev/null
+++ b/model-00041-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4ab6990a80d2d96d5d3cff5eba9f16bbc589a5587808d028d52c8bd967687ff9
+size 1208321720
diff --git a/model-00042-of-00130.safetensors b/model-00042-of-00130.safetensors
new file mode 100644
index 0000000..d0023c4
--- /dev/null
+++ b/model-00042-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86b0f9a5c91d49d87e16e06e9224003e6ece9fe4cceb81de20b7335539c89623
+size 2463870000
diff --git a/model-00043-of-00130.safetensors b/model-00043-of-00130.safetensors
new file mode 100644
index 0000000..1aa1ddb
--- /dev/null
+++ b/model-00043-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01c7827dc83cf39e9e942508c4f9bed820a2e58539993dacd1bfc3fb101fe54b
+size 1208321720
diff --git a/model-00044-of-00130.safetensors b/model-00044-of-00130.safetensors
new file mode 100644
index 0000000..ea4b8f4
--- /dev/null
+++ b/model-00044-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:619aabf8abcb5558a13906d3e5fb6bb4a26679f0caf2d598c3ff4fd7c1ef6ef4
+size 2463870000
diff --git a/model-00045-of-00130.safetensors b/model-00045-of-00130.safetensors
new file mode 100644
index 0000000..b6b929b
--- /dev/null
+++ b/model-00045-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:157980344f4b3a25b735b15842e874bd7a3c144b83a05e5bb691ba935139ef74
+size 1208321720
diff --git a/model-00046-of-00130.safetensors b/model-00046-of-00130.safetensors
new file mode 100644
index 0000000..58ac37a
--- /dev/null
+++ b/model-00046-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6cc5f611201eedbab086fd2135b27e7fbf23a51a5796d80eab7f626599a6e82e
+size 2463870000
diff --git a/model-00047-of-00130.safetensors b/model-00047-of-00130.safetensors
new file mode 100644
index 0000000..11f4d5e
--- /dev/null
+++ b/model-00047-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d61c06c59e3a5b4c4a432fc845c8eebbde7b693b1ba1c0a3e42076bd9fe1d4b
+size 1208321720
diff --git a/model-00048-of-00130.safetensors b/model-00048-of-00130.safetensors
new file mode 100644
index 0000000..5048d2c
--- /dev/null
+++ b/model-00048-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:752c11295dfda87db85b7183702b36d621b2d741e7d23a8c0ec57c804d606e9c
+size 2463870000
diff --git a/model-00049-of-00130.safetensors b/model-00049-of-00130.safetensors
new file mode 100644
index 0000000..727dd1e
--- /dev/null
+++ b/model-00049-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3531c8be2646cfb2827d58bcfcdf3edbd147c3a787c2af02ab975c2950807b6c
+size 1208321720
diff --git a/model-00050-of-00130.safetensors b/model-00050-of-00130.safetensors
new file mode 100644
index 0000000..0174338
--- /dev/null
+++ b/model-00050-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5fb53f941912ddd44f4a6eb604dcfcf15dfd4a59b394e9c58dee0e0c796d472a
+size 2463870000
diff --git a/model-00051-of-00130.safetensors b/model-00051-of-00130.safetensors
new file mode 100644
index 0000000..1615f1d
--- /dev/null
+++ b/model-00051-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:776e16d0f59ba83d22a7279f8689d106952f49ec0585a6892cbbb22ff8f978df
+size 1208321720
diff --git a/model-00052-of-00130.safetensors b/model-00052-of-00130.safetensors
new file mode 100644
index 0000000..46adcd6
--- /dev/null
+++ b/model-00052-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f35958c62cc5b9329197bec72d24b9ca25381f688ef8aa8264136e179c0f6b18
+size 2463870000
diff --git a/model-00053-of-00130.safetensors b/model-00053-of-00130.safetensors
new file mode 100644
index 0000000..2b300f9
--- /dev/null
+++ b/model-00053-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f50de42310c9bf54ff45df0e3623061399a14121585ed92950da960b80e32faf
+size 1208321720
diff --git a/model-00054-of-00130.safetensors b/model-00054-of-00130.safetensors
new file mode 100644
index 0000000..f61ea99
--- /dev/null
+++ b/model-00054-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce93f004e3e8d581f3c6adfa8230609417f42d97ae19b11ee41a272363a216a6
+size 2463870000
diff --git a/model-00055-of-00130.safetensors b/model-00055-of-00130.safetensors
new file mode 100644
index 0000000..6e2b876
--- /dev/null
+++ b/model-00055-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2625525457d52cd1ff730a2efc4321b651fa047cffbbe42392fd5ffafbde5198
+size 1208321720
diff --git a/model-00056-of-00130.safetensors b/model-00056-of-00130.safetensors
new file mode 100644
index 0000000..e8bf52e
--- /dev/null
+++ b/model-00056-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50bda61a04a9780bfaff66d41605ef841c8aa490e27968b7d6362cbfe9373873
+size 2463870000
diff --git a/model-00057-of-00130.safetensors b/model-00057-of-00130.safetensors
new file mode 100644
index 0000000..f7a67d5
--- /dev/null
+++ b/model-00057-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0329f61e0412a3295e2e88fc384e21be159fa0f97f9afeed61a93839c1defe51
+size 1208321720
diff --git a/model-00058-of-00130.safetensors b/model-00058-of-00130.safetensors
new file mode 100644
index 0000000..37a0ca7
--- /dev/null
+++ b/model-00058-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5851eb14d88fbfca17902abcffee0b23469eed3f5774963643d5fc47b965eb6a
+size 2463870000
diff --git a/model-00059-of-00130.safetensors b/model-00059-of-00130.safetensors
new file mode 100644
index 0000000..6f285d3
--- /dev/null
+++ b/model-00059-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:61ba4e7993aa46c3697b3632f1489b0c6597af09e876a0f5156304b8512cb0a8
+size 1208321720
diff --git a/model-00060-of-00130.safetensors b/model-00060-of-00130.safetensors
new file mode 100644
index 0000000..2b3483b
--- /dev/null
+++ b/model-00060-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53756d742e9d168abd93385afda05c198aae0af8d365a37ea426170492f76b95
+size 2463870000
diff --git a/model-00061-of-00130.safetensors b/model-00061-of-00130.safetensors
new file mode 100644
index 0000000..28809c3
--- /dev/null
+++ b/model-00061-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65c0cc4d1221a2455ee1ff73c92bd6967e903bdb789717ef376cb89a55c13689
+size 1208321720
diff --git a/model-00062-of-00130.safetensors b/model-00062-of-00130.safetensors
new file mode 100644
index 0000000..e12c3e2
--- /dev/null
+++ b/model-00062-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f971bc270dc5bdfeb6ed697b81a82e3bb871f67008b1e6dc630b712f27a8b6c0
+size 2463870000
diff --git a/model-00063-of-00130.safetensors b/model-00063-of-00130.safetensors
new file mode 100644
index 0000000..eb5dd7d
--- /dev/null
+++ b/model-00063-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c3b9a1da7ef0d22f872514545789d5f09016f8b19e42196291cf5ac03b97371
+size 1208321720
diff --git a/model-00064-of-00130.safetensors b/model-00064-of-00130.safetensors
new file mode 100644
index 0000000..7b7f0ff
--- /dev/null
+++ b/model-00064-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6843193f9428a615ae73d08d28f0c45780ee4a816160c739ec43eb2b97b08ebb
+size 2463870000
diff --git a/model-00065-of-00130.safetensors b/model-00065-of-00130.safetensors
new file mode 100644
index 0000000..52c7eb8
--- /dev/null
+++ b/model-00065-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:08250404376f793d88114569bc688eb1f2f0e3362d14a7f98bafdaf8946ba5b6
+size 1208321720
diff --git a/model-00066-of-00130.safetensors b/model-00066-of-00130.safetensors
new file mode 100644
index 0000000..5de4e25
--- /dev/null
+++ b/model-00066-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c66a8842ebb3ee0c57aeaa79219dea97644757e3a2687239b740605076a55ae7
+size 2463870000
diff --git a/model-00067-of-00130.safetensors b/model-00067-of-00130.safetensors
new file mode 100644
index 0000000..fc2efa2
--- /dev/null
+++ b/model-00067-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:30de7939106eee13c3287062c1cfff64151ce40dd9df2d2ed302534264dcf7d7
+size 1208321720
diff --git a/model-00068-of-00130.safetensors b/model-00068-of-00130.safetensors
new file mode 100644
index 0000000..8770af9
--- /dev/null
+++ b/model-00068-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58e5e832abae58bc00ebec9233b85ba9238f074631ca536000b2f0defdab2dcd
+size 2463870000
diff --git a/model-00069-of-00130.safetensors b/model-00069-of-00130.safetensors
new file mode 100644
index 0000000..24904cb
--- /dev/null
+++ b/model-00069-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a486eb60bbc0aa07dc4b5d579c79cacddff7a95abf09dbc31daa21826a22d32e
+size 1208321720
diff --git a/model-00070-of-00130.safetensors b/model-00070-of-00130.safetensors
new file mode 100644
index 0000000..2b23683
--- /dev/null
+++ b/model-00070-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b4c87d552ab115ed044b71a1f50b0f55673724dd33f30b47ec62c01650bd3500
+size 2463870000
diff --git a/model-00071-of-00130.safetensors b/model-00071-of-00130.safetensors
new file mode 100644
index 0000000..0f67eee
--- /dev/null
+++ b/model-00071-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:32d9fd804c11fae7b038aeb6960466898833e3674887e32924d4a740a1d78e64
+size 1208321720
diff --git a/model-00072-of-00130.safetensors b/model-00072-of-00130.safetensors
new file mode 100644
index 0000000..e419b31
--- /dev/null
+++ b/model-00072-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc04e61181fe55d87cf54944dd776b656ebbd2d00a57c931e3dbc4edd3a51f44
+size 2463870000
diff --git a/model-00073-of-00130.safetensors b/model-00073-of-00130.safetensors
new file mode 100644
index 0000000..f5a5d14
--- /dev/null
+++ b/model-00073-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f2364fd711749dee328d3d049b6e533595eb94bb1508e6e47668c1f0c6f5d5ac
+size 1208321720
diff --git a/model-00074-of-00130.safetensors b/model-00074-of-00130.safetensors
new file mode 100644
index 0000000..8029708
--- /dev/null
+++ b/model-00074-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4385a5145e5ea2f895fb49a078a536e405d5d8944c09c267104f06f79479561b
+size 2463870000
diff --git a/model-00075-of-00130.safetensors b/model-00075-of-00130.safetensors
new file mode 100644
index 0000000..b8e098a
--- /dev/null
+++ b/model-00075-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55d8e7f5ae80c68aa5a2cd5e4d061e620467a113f43eec1ac031e6aa887a21d3
+size 1208321720
diff --git a/model-00076-of-00130.safetensors b/model-00076-of-00130.safetensors
new file mode 100644
index 0000000..c662d24
--- /dev/null
+++ b/model-00076-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9e38b4e3d3f6c258a433bb5fae422daea75c4d72df610e0b43f6a50e99c72ae
+size 2463870000
diff --git a/model-00077-of-00130.safetensors b/model-00077-of-00130.safetensors
new file mode 100644
index 0000000..fef5c2d
--- /dev/null
+++ b/model-00077-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9563a5fc0288fe5b213fb834852907d47ad5b1979f69d04d12e0aa52e002566
+size 1208321720
diff --git a/model-00078-of-00130.safetensors b/model-00078-of-00130.safetensors
new file mode 100644
index 0000000..944c33a
--- /dev/null
+++ b/model-00078-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e39be88cdb339805880f4a4aeb0a9a3d054544f639710ef479a1e686496df78
+size 2463870000
diff --git a/model-00079-of-00130.safetensors b/model-00079-of-00130.safetensors
new file mode 100644
index 0000000..ac4fcef
--- /dev/null
+++ b/model-00079-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1f617cab4400626658d68088612ca345974a54796fd9396f3bc77b05b1c36d4
+size 1208321720
diff --git a/model-00080-of-00130.safetensors b/model-00080-of-00130.safetensors
new file mode 100644
index 0000000..efd10b5
--- /dev/null
+++ b/model-00080-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a3c4e028a851f8ffd08d209f38cb1597aae2bfdf9789b886235814e3ec658b4d
+size 2463870000
diff --git a/model-00081-of-00130.safetensors b/model-00081-of-00130.safetensors
new file mode 100644
index 0000000..fde8687
--- /dev/null
+++ b/model-00081-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f48db3406b5ef2bd20384031c9caddfdcf4555e34c667205c79688516d3c6701
+size 1208321720
diff --git a/model-00082-of-00130.safetensors b/model-00082-of-00130.safetensors
new file mode 100644
index 0000000..efb4279
--- /dev/null
+++ b/model-00082-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dd1321614be8b6cc91c5151f98c11c5206d87596cccea0cfd25a92b6e8489d0
+size 2463870000
diff --git a/model-00083-of-00130.safetensors b/model-00083-of-00130.safetensors
new file mode 100644
index 0000000..e675d8d
--- /dev/null
+++ b/model-00083-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93a3c09729b0e311a7fb9b97bc5a5fe25298760bed5e06b8df03b620dc617980
+size 1208321720
diff --git a/model-00084-of-00130.safetensors b/model-00084-of-00130.safetensors
new file mode 100644
index 0000000..e474037
--- /dev/null
+++ b/model-00084-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cf9c0f576454648d312776dd41722d3a70b811020f87101f343bf919374cb8ba
+size 2463870000
diff --git a/model-00085-of-00130.safetensors b/model-00085-of-00130.safetensors
new file mode 100644
index 0000000..494a8cb
--- /dev/null
+++ b/model-00085-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a4e6b1e1396360c8ec588b5de70a51575f42c4ccc0bfc7681761640c6c9c1c79
+size 1208321720
diff --git a/model-00086-of-00130.safetensors b/model-00086-of-00130.safetensors
new file mode 100644
index 0000000..cbab7f0
--- /dev/null
+++ b/model-00086-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b35a961ad11e4886b9a2701217c2d987557f86bf4b866575994d6f53e6a93dfc
+size 2463870000
diff --git a/model-00087-of-00130.safetensors b/model-00087-of-00130.safetensors
new file mode 100644
index 0000000..4a4bd1b
--- /dev/null
+++ b/model-00087-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3489eab628868300d70a91290a097cc991518e672211641e7027dbcde86df006
+size 1208321720
diff --git a/model-00088-of-00130.safetensors b/model-00088-of-00130.safetensors
new file mode 100644
index 0000000..9abed8a
--- /dev/null
+++ b/model-00088-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4016e6666329fb7aaa39d676c4d711505613f0f508e11e7fd34bacb01c0a1fc5
+size 2463870000
diff --git a/model-00089-of-00130.safetensors b/model-00089-of-00130.safetensors
new file mode 100644
index 0000000..8ad5ced
--- /dev/null
+++ b/model-00089-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:73c94cd6f41db36e398c5f53bf784cbe331380a5c5c6337c041a53b2a28d7335
+size 1208321720
diff --git a/model-00090-of-00130.safetensors b/model-00090-of-00130.safetensors
new file mode 100644
index 0000000..11fcb5d
--- /dev/null
+++ b/model-00090-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:660ad11ecbce3a5690c44f6aa197e99e8265eec77adb58172ae16ac9d6d555db
+size 2463870000
diff --git a/model-00091-of-00130.safetensors b/model-00091-of-00130.safetensors
new file mode 100644
index 0000000..06b5516
--- /dev/null
+++ b/model-00091-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:251879e4aed926f4141c656cca20165d97798ecd04b19b25c7e935e364d677e6
+size 1208321720
diff --git a/model-00092-of-00130.safetensors b/model-00092-of-00130.safetensors
new file mode 100644
index 0000000..226c575
--- /dev/null
+++ b/model-00092-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:80ae45db87452043faccd182566b941cb139daf8b6bbe8ce4e93c774b3035a40
+size 2463870000
diff --git a/model-00093-of-00130.safetensors b/model-00093-of-00130.safetensors
new file mode 100644
index 0000000..67cfb02
--- /dev/null
+++ b/model-00093-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b2133ed6b9353c2faa6fc0636e628b503fcc06595b00938127f1d57f8ded3ca
+size 1208321720
diff --git a/model-00094-of-00130.safetensors b/model-00094-of-00130.safetensors
new file mode 100644
index 0000000..dadbe59
--- /dev/null
+++ b/model-00094-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38d578dbfae615ab955c6685b93ef6f6edb8723795a7dc5322eef20022f2e3c3
+size 2463870000
diff --git a/model-00095-of-00130.safetensors b/model-00095-of-00130.safetensors
new file mode 100644
index 0000000..45d0bfc
--- /dev/null
+++ b/model-00095-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6e8aadd428101830f2f8dc35a9d9c8728297e148f47128bad111a7e05e03c3f3
+size 1208321720
diff --git a/model-00096-of-00130.safetensors b/model-00096-of-00130.safetensors
new file mode 100644
index 0000000..eb672a8
--- /dev/null
+++ b/model-00096-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a6b206f258506b4ec553c3c5d8fbd6edbe7072d229445d3eda4d8ee5f7cc02df
+size 2463870000
diff --git a/model-00097-of-00130.safetensors b/model-00097-of-00130.safetensors
new file mode 100644
index 0000000..1ec8d21
--- /dev/null
+++ b/model-00097-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:723e46aa887af11c686ca083f3ac85093c8d723488febc48b9a74f3984b9662a
+size 1208321720
diff --git a/model-00098-of-00130.safetensors b/model-00098-of-00130.safetensors
new file mode 100644
index 0000000..632e08e
--- /dev/null
+++ b/model-00098-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f2424d85d24c10ada76ad98730ae5c11f0e21fde4a9f743a6b16671ffcbc89aa
+size 2463870000
diff --git a/model-00099-of-00130.safetensors b/model-00099-of-00130.safetensors
new file mode 100644
index 0000000..fe48680
--- /dev/null
+++ b/model-00099-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4265c4dfb1a96c739e8f013620bddb47c24c8b0ef773561b9b82d9dfbf4fee39
+size 1208321720
diff --git a/model-00100-of-00130.safetensors b/model-00100-of-00130.safetensors
new file mode 100644
index 0000000..fafa110
--- /dev/null
+++ b/model-00100-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ec9e1c654708baa52b8c3e9efc2a9ed0dc48c2e70e6bb76376c42a4d6587a454
+size 2463870000
diff --git a/model-00101-of-00130.safetensors b/model-00101-of-00130.safetensors
new file mode 100644
index 0000000..635c876
--- /dev/null
+++ b/model-00101-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0c9daefb8fb272e6986ceb82046a06fff3943b55542eb38b1620c198a1426c9d
+size 1208321720
diff --git a/model-00102-of-00130.safetensors b/model-00102-of-00130.safetensors
new file mode 100644
index 0000000..2e2d1fa
--- /dev/null
+++ b/model-00102-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:88bec437439fcedb12208c2255806e8255e2d0e382ce386a5a52087db02257fa
+size 2463870000
diff --git a/model-00103-of-00130.safetensors b/model-00103-of-00130.safetensors
new file mode 100644
index 0000000..71e45e2
--- /dev/null
+++ b/model-00103-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34c7a8a2857213f0815b88fc5bda4a818e4db3259aec303b0fe5b9e7bda78dad
+size 1208321720
diff --git a/model-00104-of-00130.safetensors b/model-00104-of-00130.safetensors
new file mode 100644
index 0000000..98ed2d1
--- /dev/null
+++ b/model-00104-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52db1e44d37997b8d247bd72ad3f7663b19991afbdca6428ae96548efc858038
+size 2463870000
diff --git a/model-00105-of-00130.safetensors b/model-00105-of-00130.safetensors
new file mode 100644
index 0000000..8a37906
--- /dev/null
+++ b/model-00105-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:272e89fb71dca48ffbeaf021f6e0a940a50271eaea28dd79cb8ec0179aaf84a6
+size 1208321720
diff --git a/model-00106-of-00130.safetensors b/model-00106-of-00130.safetensors
new file mode 100644
index 0000000..837e42d
--- /dev/null
+++ b/model-00106-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31cad8f509206f6adae0026b06d06fd1dfce6fb4772627f53faff6a5d46df717
+size 2463870000
diff --git a/model-00107-of-00130.safetensors b/model-00107-of-00130.safetensors
new file mode 100644
index 0000000..ca5fc27
--- /dev/null
+++ b/model-00107-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:980871e168f0e832d23fe36a17289c64ced49201d2a2c3f27fae33d7094ba898
+size 1208321720
diff --git a/model-00108-of-00130.safetensors b/model-00108-of-00130.safetensors
new file mode 100644
index 0000000..63d90a7
--- /dev/null
+++ b/model-00108-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfa9f4d88745a955aa39357d12374dcbdfe339e087a73a0f67446dfa1f02f5ac
+size 2463870000
diff --git a/model-00109-of-00130.safetensors b/model-00109-of-00130.safetensors
new file mode 100644
index 0000000..b8f7a29
--- /dev/null
+++ b/model-00109-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5049bcc814c7dcfc907a45f5f43ab0329aefcb25841e53a68e363abd2e1e41a3
+size 1208321720
diff --git a/model-00110-of-00130.safetensors b/model-00110-of-00130.safetensors
new file mode 100644
index 0000000..0abab81
--- /dev/null
+++ b/model-00110-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63ed8e64c4c4e03a8b60373bee658b1816fafda324464df020b872aaca6c12c
+size 2463870000
diff --git a/model-00111-of-00130.safetensors b/model-00111-of-00130.safetensors
new file mode 100644
index 0000000..f1196a5
--- /dev/null
+++ b/model-00111-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a32ba627c4e30b147ef9fc2fa0ee8e42d7c075581c4041f50a569d0874dea5c
+size 1208321720
diff --git a/model-00112-of-00130.safetensors b/model-00112-of-00130.safetensors
new file mode 100644
index 0000000..761ee41
--- /dev/null
+++ b/model-00112-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8804e53197cb1c6dae1bec0a6cb8c0b63c7ec0eab495f3993f494c1c1b6604f
+size 2463870000
diff --git a/model-00113-of-00130.safetensors b/model-00113-of-00130.safetensors
new file mode 100644
index 0000000..ef6367d
--- /dev/null
+++ b/model-00113-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:12acef967bd4afb64b1e28c7d828ff0f547145362494673d5f5bb9b42406fffb
+size 1208321720
diff --git a/model-00114-of-00130.safetensors b/model-00114-of-00130.safetensors
new file mode 100644
index 0000000..f6d5bfd
--- /dev/null
+++ b/model-00114-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1a4480030b1465fd1ded96a2bc2db5cf55649105c768d40dc7c0578c254f68c
+size 2463870000
diff --git a/model-00115-of-00130.safetensors b/model-00115-of-00130.safetensors
new file mode 100644
index 0000000..26b16ca
--- /dev/null
+++ b/model-00115-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:81f9a8c9e0e0aeffb74c7353b5c7f8617efe18af0def47dd7f6c8f124fa860cb
+size 1208321720
diff --git a/model-00116-of-00130.safetensors b/model-00116-of-00130.safetensors
new file mode 100644
index 0000000..31e39a5
--- /dev/null
+++ b/model-00116-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc5b40d71d39d2dc8da9a00f24c8270b333cd13de795e5656d61b4e09c3eca32
+size 2463870000
diff --git a/model-00117-of-00130.safetensors b/model-00117-of-00130.safetensors
new file mode 100644
index 0000000..b72aed3
--- /dev/null
+++ b/model-00117-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f48256d3b80e3df5c3226f5634e55573aaef7956c6fc0af67922b0e9878bad6
+size 1208321720
diff --git a/model-00118-of-00130.safetensors b/model-00118-of-00130.safetensors
new file mode 100644
index 0000000..d07b136
--- /dev/null
+++ b/model-00118-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2734c37f752634f8ee0197465af9b47b11e0537e55982367fdc0f559f286cb03
+size 2463870000
diff --git a/model-00119-of-00130.safetensors b/model-00119-of-00130.safetensors
new file mode 100644
index 0000000..768df33
--- /dev/null
+++ b/model-00119-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f5cec6f846ff50933ab1cbe79aaf2b4d34295b99f983b2c8f6ddb892fd01539
+size 1208321720
diff --git a/model-00120-of-00130.safetensors b/model-00120-of-00130.safetensors
new file mode 100644
index 0000000..0c97f29
--- /dev/null
+++ b/model-00120-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:90bdedf7b4bdc3f0946d246577132ff88768393e237c0440463099c6d8fa733e
+size 2463870000
diff --git a/model-00121-of-00130.safetensors b/model-00121-of-00130.safetensors
new file mode 100644
index 0000000..31f6e94
--- /dev/null
+++ b/model-00121-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d9dc962a4d204e817df44020a9e7bfdbe7b2872cf272868189f0407a789dfbe
+size 1208321720
diff --git a/model-00122-of-00130.safetensors b/model-00122-of-00130.safetensors
new file mode 100644
index 0000000..553e0af
--- /dev/null
+++ b/model-00122-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:56431ebd1036851dda39d860f1c7c7094f513122cf1dc4f8897187d445edfc5a
+size 2463870000
diff --git a/model-00123-of-00130.safetensors b/model-00123-of-00130.safetensors
new file mode 100644
index 0000000..33ee573
--- /dev/null
+++ b/model-00123-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be8d1cd04e76c2626a937fffb588bf73041e6ff729d2c24c534ffdef32379139
+size 1208321720
diff --git a/model-00124-of-00130.safetensors b/model-00124-of-00130.safetensors
new file mode 100644
index 0000000..2827a2c
--- /dev/null
+++ b/model-00124-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d5c3c7a03795a0d0966e4840c54dfba99857945146ca47f404bc4b808b19e5c
+size 1229199584
diff --git a/model-00125-of-00130.safetensors b/model-00125-of-00130.safetensors
new file mode 100644
index 0000000..ba3b119
--- /dev/null
+++ b/model-00125-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60d95b10b6e140a9626a7058d5038528f2ff80148dc4569b881db56052046509
+size 40
diff --git a/model-00126-of-00130.safetensors b/model-00126-of-00130.safetensors
new file mode 100644
index 0000000..ba3b119
--- /dev/null
+++ b/model-00126-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60d95b10b6e140a9626a7058d5038528f2ff80148dc4569b881db56052046509
+size 40
diff --git a/model-00127-of-00130.safetensors b/model-00127-of-00130.safetensors
new file mode 100644
index 0000000..ba3b119
--- /dev/null
+++ b/model-00127-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60d95b10b6e140a9626a7058d5038528f2ff80148dc4569b881db56052046509
+size 40
diff --git a/model-00128-of-00130.safetensors b/model-00128-of-00130.safetensors
new file mode 100644
index 0000000..ba3b119
--- /dev/null
+++ b/model-00128-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60d95b10b6e140a9626a7058d5038528f2ff80148dc4569b881db56052046509
+size 40
diff --git a/model-00129-of-00130.safetensors b/model-00129-of-00130.safetensors
new file mode 100644
index 0000000..ba3b119
--- /dev/null
+++ b/model-00129-of-00130.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60d95b10b6e140a9626a7058d5038528f2ff80148dc4569b881db56052046509
+size 40
diff --git a/model.safetensors.index.json b/model.safetensors.index.json
new file mode 100644
index 0000000..8541037
--- /dev/null
+++ b/model.safetensors.index.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:27f213c423ab1824390ffb4adad22cdda24809d56d38f0363c0e2090527dfeeb
+size 10213486
diff --git a/modeling_minimax_m2.py b/modeling_minimax_m2.py
new file mode 100644
index 0000000..8846d38
--- /dev/null
+++ b/modeling_minimax_m2.py
@@ -0,0 +1,706 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_minimax_m2.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2025 the HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from collections.abc import Callable
+from typing import Optional, Union, Unpack
+
+import torch
+from torch import nn
+
+from transformers.activations import ACT2FN
+from transformers.cache_utils import Cache, DynamicCache
+from transformers.generation import GenerationMixin
+from transformers.integrations import use_kernel_forward_from_hub
+from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
+from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
+from transformers.modeling_layers import (
+ GenericForQuestionAnswering,
+ GenericForSequenceClassification,
+ GenericForTokenClassification,
+ GradientCheckpointingLayer,
+)
+from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
+from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
+from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
+from transformers.utils.deprecation import deprecate_kwarg
+from transformers.utils.generic import OutputRecorder, check_model_inputs
+from .configuration_minimax_m2 import MiniMaxM2Config
+
+
+class MiniMaxM2MLP(nn.Module):
+ def __init__(self, config: MiniMaxM2Config):
+ super().__init__()
+ self.ffn_dim = config.intermediate_size
+ self.hidden_dim = config.hidden_size
+
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
+
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states):
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
+ current_hidden_states = self.w2(current_hidden_states)
+ return current_hidden_states
+
+
+class MiniMaxM2Experts(nn.ModuleList):
+ """
+ ModuleList of experts.
+ """
+
+ def __init__(self, config: MiniMaxM2Config):
+ super().__init__()
+ self.top_k = config.num_experts_per_tok
+ self.num_experts = config.num_local_experts
+ for _ in range(self.num_experts):
+ self.append(MiniMaxM2MLP(config))
+
+ def forward(
+ self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states: (batch_size * sequence_length, hidden_dim)
+ selected_experts: (batch_size * sequence_length, top_k)
+ routing_weights: (batch_size * sequence_length, top_k)
+ Returns:
+ (batch_size * sequence_length, hidden_dim)
+ """
+ final_hidden_states = torch.zeros_like(hidden_states)
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0)
+
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
+ for expert_idx in expert_hit:
+ idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
+ current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None]
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
+ return final_hidden_states
+
+
+class MiniMaxM2SparseMoeBlock(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.top_k = config.num_experts_per_tok
+ self.jitter_noise = config.router_jitter_noise
+ self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False)
+ self.experts = MiniMaxM2Experts(config)
+ self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
+
+ def route_tokens_to_experts(self, router_logits):
+ routing_weights = torch.nn.functional.sigmoid(router_logits.float())
+ scores_for_choice = routing_weights + self.e_score_correction_bias
+ _, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
+ top_k_weights = routing_weights.gather(1, top_k_index)
+ top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
+ return top_k_index, top_k_weights.to(router_logits.dtype)
+
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
+ if self.training and self.jitter_noise > 0:
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
+ router_logits = self.gate(hidden_states)
+ top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
+ hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype))
+ hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
+ return hidden_states, router_logits
+
+
+@use_kernel_forward_from_hub("RMSNorm")
+class MiniMaxM2RMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ MiniMaxM2RMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+ def extra_repr(self):
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def eager_attention_forward(
+ module: nn.Module,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_mask: Optional[torch.Tensor],
+ scaling: float,
+ dropout: float = 0.0,
+ **kwargs: Unpack[TransformersKwargs],
+):
+ key_states = repeat_kv(key, module.num_key_value_groups)
+ value_states = repeat_kv(value, module.num_key_value_groups)
+
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
+ if attention_mask is not None:
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ return attn_output, attn_weights
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+
+ # Keep half or full tensor for later concatenation
+ rotary_dim = cos.shape[-1]
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
+
+ # Apply rotary embeddings on the first half or full tensor
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
+
+ # Concatenate back to full shape
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
+ return q_embed, k_embed
+
+
+class MiniMaxM2Attention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
+ self.scaling = self.head_dim**-0.5
+ self.attention_dropout = config.attention_dropout
+ self.is_causal = True
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
+
+ self.use_qk_norm = config.use_qk_norm
+ if self.use_qk_norm:
+ self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps)
+ self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps)
+
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor],
+ past_key_values: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, self.head_dim)
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ if self.use_qk_norm: # main diff from Llama
+ query_states = self.q_norm(query_states)
+ key_states = self.k_norm(key_states)
+
+ key_states = key_states.view(hidden_shape)
+ query_states = query_states.view(hidden_shape)
+ value_states = value_states.view(hidden_shape)
+
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_values is not None:
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+class MiniMaxM2DecoderLayer(GradientCheckpointingLayer):
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.self_attn = MiniMaxM2Attention(config, layer_idx)
+
+ self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
+ self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> torch.FloatTensor:
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, _ = self.self_attn(
+ hidden_states=hidden_states,
+ position_embeddings=position_embeddings,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ cache_position=cache_position,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states, _ = self.block_sparse_moe(hidden_states)
+ hidden_states = residual + hidden_states
+
+ return hidden_states
+
+
+class MiniMaxM2RotaryEmbedding(nn.Module):
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
+
+ def __init__(self, config: MiniMaxM2Config, device=None):
+ super().__init__()
+ # BC: "rope_type" was originally "type"
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
+ else:
+ self.rope_type = "default"
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.original_inv_freq = self.inv_freq
+
+ @torch.no_grad()
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
+ def forward(self, x, position_ids):
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
+ position_ids_expanded = position_ids[:, None, :].float()
+
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos() * self.attention_scaling
+ sin = emb.sin() * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+@auto_docstring
+class MiniMaxM2PreTrainedModel(PreTrainedModel):
+ config: MiniMaxM2Config
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["MiniMaxM2DecoderLayer"]
+ _skip_keys_device_placement = ["past_key_values"]
+ _supports_flash_attn = True
+ _supports_sdpa = True
+ _supports_flex_attn = True
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
+ _supports_attention_backend = True
+ _can_record_outputs = {
+ "router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1),
+ "hidden_states": MiniMaxM2DecoderLayer,
+ "attentions": MiniMaxM2Attention,
+ }
+
+
+@auto_docstring
+class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
+ def __init__(self, config: MiniMaxM2Config):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config)
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @check_model_inputs
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> MoeModelOutputWithPast:
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache(config=self.config)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
+ causal_mask = mask_function(
+ config=self.config,
+ input_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ cache_position=cache_position,
+ past_key_values=past_key_values,
+ position_ids=position_ids,
+ )
+
+ hidden_states = inputs_embeds
+
+ # create position embeddings to be shared across the decoder layers
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
+ hidden_states = decoder_layer(
+ hidden_states,
+ position_embeddings=position_embeddings,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = self.norm(hidden_states)
+
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values,
+ )
+
+
+def load_balancing_loss_func(
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
+ num_experts: Optional[int] = None,
+ top_k=2,
+ attention_mask: Optional[torch.Tensor] = None,
+) -> Union[torch.Tensor, int]:
+ r"""
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
+
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
+ experts is too unbalanced.
+
+ Args:
+ gate_logits:
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
+ shape [batch_size X sequence_length, num_experts].
+ num_experts:
+ Number of experts
+ top_k:
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
+ parameter.
+ attention_mask (`torch.Tensor`, *optional*):
+ The attention_mask used in forward function
+ shape [batch_size X sequence_length] if not None.
+
+ Returns:
+ The auxiliary loss.
+ """
+ if gate_logits is None or not isinstance(gate_logits, tuple):
+ return 0
+
+ if isinstance(gate_logits, tuple):
+ compute_device = gate_logits[0].device
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
+
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
+
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
+
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
+
+ if attention_mask is None:
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
+ else:
+ batch_size, sequence_length = attention_mask.shape
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
+ expert_attention_mask = (
+ attention_mask[None, :, :, None, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
+ .reshape(-1, top_k, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
+ expert_attention_mask, dim=0
+ )
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
+ router_per_expert_attention_mask = (
+ attention_mask[None, :, :, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
+ .reshape(-1, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
+ router_per_expert_attention_mask, dim=0
+ )
+
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
+ return overall_loss * num_experts
+
+
+@auto_docstring
+class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["lm_head.weight"]
+ _tp_plan = {"lm_head": "colwise_rep"}
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = MiniMaxM2Model(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+ self.router_aux_loss_coef = config.router_aux_loss_coef
+ self.num_experts = config.num_local_experts
+ self.num_experts_per_tok = config.num_experts_per_tok
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @can_return_tuple
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ logits_to_keep: Union[int, torch.Tensor] = 0,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> MoeCausalLMOutputWithPast:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM
+
+ >>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs: MoeModelOutputWithPast = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_router_logits=output_router_logits,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs.last_hidden_state
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
+
+ aux_loss = None
+ if output_router_logits:
+ aux_loss = load_balancing_loss_func(
+ outputs.router_logits,
+ self.num_experts,
+ self.num_experts_per_tok,
+ attention_mask,
+ )
+ if labels is not None:
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
+
+ return MoeCausalLMOutputWithPast(
+ loss=loss,
+ aux_loss=aux_loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ router_logits=outputs.router_logits,
+ )
+
+
+class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel):
+ pass
+
+
+class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel):
+ pass
+
+
+class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel):
+ pass
+
+
+__all__ = [
+ "MiniMaxM2ForCausalLM",
+ "MiniMaxM2ForQuestionAnswering",
+ "MiniMaxM2Model",
+ "MiniMaxM2PreTrainedModel",
+ "MiniMaxM2ForSequenceClassification",
+ "MiniMaxM2ForTokenClassification",
+]
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000..2c67e9d
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:757622126525aeeb131756849d93298070ff3f0319c455ec8c5bb0f6b1cebbe8
+size 9730160
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000..ff8e2eb
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,495 @@
+{
+ "added_tokens_decoder": {
+ "200000": {
+ "content": "]!p~[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200001": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200002": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200003": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200004": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200005": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200006": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200007": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200008": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200009": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200010": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200011": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200012": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200013": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200014": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200015": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200016": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200017": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200018": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200019": {
+ "content": "]~b]",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200020": {
+ "content": "[e~[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200021": {
+ "content": "]!d~[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200022": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200023": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200024": {
+ "content": "]<]speech[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200025": {
+ "content": "]<]image[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200026": {
+ "content": "]<]video[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200027": {
+ "content": "]<]start of speech[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200028": {
+ "content": "]<]end of speech[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200029": {
+ "content": "]<]start of image[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200030": {
+ "content": "]<]end of image[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200031": {
+ "content": "]<]start of video[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200032": {
+ "content": "]<]end of video[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200033": {
+ "content": "]<]vision pad[>[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200034": {
+ "content": "]~!b[",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200035": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200036": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ "200037": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200038": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200039": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200040": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200041": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200042": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "200043": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ "200044": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ "200045": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ "200046": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ "200047": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ "200048": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ "200049": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ "200050": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": false
+ },
+ "200051": {
+ "content": " ",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": false
+ },
+ "200052": {
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": false
+ },
+ "200053": {
+ "content": " ",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": false
+ }
+ },
+ "additional_special_tokens": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "]<]speech[>[",
+ "]<]image[>[",
+ "]<]video[>[",
+ "]<]start of speech[>[",
+ "]<]end of speech[>[",
+ "]<]start of image[>[",
+ "]<]end of image[>[",
+ "]<]start of video[>[",
+ "]<]end of video[>[",
+ "]<]vision pad[>[",
+ "]~!b[",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "[e~[",
+ "]!d~[",
+ "]!p~[",
+ "]~b]",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "add_prefix_space": false,
+ "bos_token": "]~!b[",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "[e~[",
+ "model_max_length": 40960000,
+ "tokenizer_class": "GPT2Tokenizer",
+ "unk_token": "]!d~["
+}
diff --git a/vocab.json b/vocab.json
new file mode 100644
index 0000000..49a3e05
--- /dev/null
+++ b/vocab.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b44c066b5dc34c800c4e3ecbd85f3e95ce3bfdbf8a5fe30223e005175103578a
+size 4705413