Skip to content

Commit b333524

Browse files
committed
bugfix
1 parent fef8872 commit b333524

File tree

1 file changed

+7
-3
lines changed

1 file changed

+7
-3
lines changed

models/llama3/api/chat_format.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,11 @@ def __init__(self, tokenizer: Tokenizer):
6767
def _encode_header(self, role: str) -> List[int]:
6868
tokens = []
6969
tokens.append(self.tokenizer.special_tokens["<|start_header_id|>"])
70-
tokens.extend(self.tokenizer.encode(role, bos=False, eos=False))
70+
tokens.extend(
71+
self.tokenizer.encode(
72+
"ipython" if role == "tool" else role, bos=False, eos=False
73+
)
74+
)
7175
tokens.append(self.tokenizer.special_tokens["<|end_header_id|>"])
7276
tokens.extend(self.tokenizer.encode("\n\n", bos=False, eos=False))
7377
return tokens
@@ -118,7 +122,7 @@ def _process(c):
118122
def encode_message(
119123
self, message: RawMessage, tool_prompt_format: ToolPromptFormat
120124
) -> Tuple[List[int], List[PIL_Image.Image]]:
121-
tokens = self._encode_header(role_str(message.role))
125+
tokens = self._encode_header(message.role)
122126
images = []
123127

124128
def _process_content(c):
@@ -165,7 +169,7 @@ def encode_dialog_prompt(
165169
images.extend(imgs)
166170

167171
# Add the start of an assistant message for the model to complete.
168-
tokens.extend(self._encode_header(role_str(Role.assistant)))
172+
tokens.extend(self._encode_header("assistant"))
169173

170174
return self._model_input_from_tokens_images(tokens, images)
171175

0 commit comments

Comments
 (0)