Skip to content

Commit 6a2b413

Browse files
scottMichael-A-Kuykendall
andcommitted
style: apply rustfmt to format_prompt implementation
Co-authored-by: Michael-A-Kuykendall <github@kuykendall.dev>
1 parent 2b90305 commit 6a2b413

1 file changed

Lines changed: 4 additions & 6 deletions

File tree

src/engine/llama.rs

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -525,7 +525,9 @@ impl LoadedModel for LlamaLoaded {
525525
}
526526
// Use token_to_bytes + from_utf8_lossy to handle multi-byte token boundaries
527527
// (e.g. qwen3 emits partial UTF-8 sequences per token that fail strict from_utf8)
528-
let piece = self.model.token_to_bytes(token, Special::Plaintext)
528+
let piece = self
529+
.model
530+
.token_to_bytes(token, Special::Plaintext)
529531
.map(|b| String::from_utf8_lossy(&b).into_owned())
530532
.unwrap_or_default();
531533
out.push_str(&piece);
@@ -573,11 +575,7 @@ impl LoadedModel for LlamaLoaded {
573575
let chat: Vec<shimmy_llama_cpp_2::model::LlamaChatMessage> = messages
574576
.iter()
575577
.filter_map(|(role, content)| {
576-
shimmy_llama_cpp_2::model::LlamaChatMessage::new(
577-
role.clone(),
578-
content.clone(),
579-
)
580-
.ok()
578+
shimmy_llama_cpp_2::model::LlamaChatMessage::new(role.clone(), content.clone()).ok()
581579
})
582580
.collect();
583581
self.model.apply_chat_template(tmpl, &chat, true).ok()

0 commit comments

Comments
 (0)