Skip to content

Commit 6cb1ec0

Browse files
committed
Replace deprecated torch_dtype parameter with dtype in transformers
Replace deprecated torch_dtype parameter with dtype in transformers from_pretrained calls. Bump minimum transformers version to 4.56.1 where dtype parameter was introduced. Signed-off-by: Jose Angel Morena <[email protected]>
1 parent ceed4df commit 6cb1ec0

File tree

87 files changed

+115
-115
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

87 files changed

+115
-115
lines changed

examples/autoround/README.md

Lines changed: 1 addition & 1 deletion

examples/autoround/llama3_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
# Select model and load it.
99
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
10-
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype="auto")
10+
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
1111
tokenizer = AutoTokenizer.from_pretrained(model_id)
1212

1313
# Select calibration dataset.

examples/awq/README.md

Lines changed: 1 addition & 1 deletion

examples/awq/llama_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
# Select model and load it.
99
MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"
1010

11-
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto")
11+
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, dtype="auto")
1212
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
1313

1414
# Select calibration dataset.

examples/awq/qwen3-vl-30b-a3b-Instruct-example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
# Load model.
1212
model = Qwen3VLMoeForConditionalGeneration.from_pretrained(
13-
MODEL_ID, torch_dtype=torch.bfloat16, device_map=None, trust_remote_code=True
13+
MODEL_ID, dtype=torch.bfloat16, device_map=None, trust_remote_code=True
1414
)
1515
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
1616

examples/awq/qwen3_coder_moe_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def preprocess(example):
5151

5252

5353
if __name__ == "__main__":
54-
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto")
54+
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, dtype="auto")
5555
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
5656

5757
###

examples/awq/qwen3_moe_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
# Select model and load it.
99
MODEL_ID = "Qwen/Qwen3-30B-A3B"
1010

11-
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto")
11+
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, dtype="auto")
1212
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
1313

1414
# Select calibration dataset.

examples/big_models_with_sequential_onloading/README.md

Lines changed: 1 addition & 1 deletion

examples/big_models_with_sequential_onloading/llama3.3_70b.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
model_id = "meta-llama/Llama-3.3-70B-Instruct"
1111
model = AutoModelForCausalLM.from_pretrained(
1212
model_id,
13-
torch_dtype="auto",
13+
dtype="auto",
1414
device_map=None,
1515
)
1616
tokenizer = AutoTokenizer.from_pretrained(model_id)

examples/compressed_inference/fp8_compressed_inference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
compressed_model = AutoModelForCausalLM.from_pretrained(
2323
MODEL_STUB,
24-
torch_dtype="auto",
24+
dtype="auto",
2525
device_map="auto",
2626
)
2727

0 commit comments

Comments
 (0)