Skip to content
This repository was archived by the owner on Aug 30, 2024. It is now read-only.

Commit 1051182

Browse files
authored
Update convert_chatglm.py (#185)
1 parent 66bcc8b commit 1051182

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

neural_speed/convert/convert_chatglm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -973,7 +973,7 @@ def main(args_in: Optional[List[str]] = None) -> None:
973973
# ChatGLM3 shares the same architecture and model config with ChatGLM2
974974
# but its tokenizer further supports system prompts,
975975
# so we can check system token to discriminate ChatGLM3 from ChatGLM2.
976-
if "<|system|>" in tokenizer.tokenizer.special_tokens:
976+
if hasattr(tokenizer, "tokenizer") and "<|system|>" in tokenizer.tokenizer.special_tokens:
977977
if args.format == "GGUF":
978978
chatglm3_convert_gguf(model, tokenizer, dir_model, fname_out, ftype, hparams)
979979
else:

0 commit comments

Comments
 (0)