Update README.md
This commit is contained in:
parent
57b0d4fe0c
commit
30393e325d
@ -50,8 +50,8 @@ Also check out our [GPTQ documentation](https://qwen.readthedocs.io/en/latest/qu
|
|||||||
Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents.
|
Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
||||||
model_name = "Qwen/Qwen2.5-72B-Instruct-GPTQ-Int4"
|
model_name = "qwen/Qwen2.5-72B-Instruct-GPTQ-Int4"
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
model_name,
|
model_name,
|
||||||
torch_dtype="auto",
|
torch_dtype="auto",
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user