Skip to content

Commit 4c614c9

Browse files
Merge pull request #9 from jackrabbithanna/images-cont
Images cont
2 parents df82ad0 + 755152e commit 4c614c9

File tree

4 files changed

+67
-13
lines changed

4 files changed

+67
-13
lines changed

meson.build

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
project('gnollama',
2-
version: '0.4.0',
2+
version: '0.5.0',
33
meson_version: '>= 1.0.0',
44
default_options: [ 'warning_level=2', 'werror=false', ],
55
)

src/bubbles.py

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,8 @@
44
#
55
# SPDX-License-Identifier: GPL-3.0-or-later
66

7-
from gi.repository import Gtk, GObject, Pango, GLib
7+
import base64
8+
from gi.repository import Gtk, GObject, Pango, GLib, Gdk
89
from .markdown_view import MarkdownView
910

1011
class ChatBubble(Gtk.ListBoxRow):
@@ -40,9 +41,39 @@ def __init__(self, is_user=False, **kwargs):
4041
class UserBubble(ChatBubble):
4142
__gtype_name__ = 'UserBubble'
4243

43-
def __init__(self, text, **kwargs):
44+
45+
def __init__(self, text, images=None, **kwargs):
4446
super().__init__(is_user=True, **kwargs)
4547

48+
# If we have images, show them
49+
if images:
50+
images_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
51+
images_box.set_spacing(6)
52+
images_box.set_halign(Gtk.Align.END)
53+
54+
for img_b64 in images:
55+
try:
56+
# Decode base64
57+
start_idx = 0
58+
if "," in img_b64:
59+
start_idx = img_b64.find(",") + 1
60+
61+
img_data = base64.b64decode(img_b64[start_idx:])
62+
bytes_data = GLib.Bytes.new(img_data)
63+
texture = Gdk.Texture.new_from_bytes(bytes_data)
64+
65+
picture = Gtk.Picture.new_for_paintable(texture)
66+
picture.set_content_fit(Gtk.ContentFit.SCALE_DOWN)
67+
picture.set_size_request(200, 200) # Max size
68+
picture.set_can_shrink(True) # Allow shrinking
69+
70+
# Wrap in frame or styling if needed, for now just the picture
71+
images_box.append(picture)
72+
except Exception as e:
73+
print(f"Failed to load image in bubble: {e}")
74+
75+
self.bubble_box.append(images_box)
76+
4677
label = Gtk.Label(label=text)
4778
label.set_wrap(True)
4879
label.set_max_width_chars(50)

src/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def on_about_action(self, *args):
5454
about = Adw.AboutDialog(application_name='gnollama',
5555
application_icon='com.github.jackrabbithanna.Gnollama',
5656
developer_name='Jackrabbithanna',
57-
version='0.4.0',
57+
version='0.5.0',
5858
developers=['Jackrabbithanna'],
5959
copyright='© 2025 Jackrabbithanna')
6060
# Translators: Replace "translator-credits" with your name/username, and optionally an email or URL.

src/tab.py

Lines changed: 32 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ def append_response_chunk(self, text):
4242
def on_response_complete(self, tab, model_name):
4343
msg = {
4444
"role": "assistant",
45-
"content": self.current_response_full_text
45+
"content": self.current_response_full_text,
46+
"model": model_name
4647
}
4748
if self.current_thinking_full_text:
4849
msg["thinking_content"] = self.current_thinking_full_text
@@ -62,6 +63,15 @@ def on_response_complete(self, tab, model_name):
6263
thinking_val = getattr(self, 'current_thinking_val', None)
6364
if thinking_val is not None:
6465
options['thinking_val'] = thinking_val
66+
67+
# Save "logprobs" setting if present
68+
logprobs_val = getattr(self, 'current_logprobs', None)
69+
if logprobs_val is not None:
70+
options['logprobs'] = logprobs_val
71+
72+
top_logprobs_val = getattr(self, 'current_top_logprobs', None)
73+
if top_logprobs_val is not None:
74+
options['top_logprobs'] = top_logprobs_val
6575

6676
self.storage.save_chat(self.chat_id, self.history, model=model_name, options=options, system=system)
6777

@@ -84,6 +94,8 @@ def process(self, tab, **kwargs):
8494
self.current_options = kwargs.get('options')
8595
self.current_system = system
8696
self.current_thinking_val = kwargs.get('thinking')
97+
self.current_logprobs = kwargs.get('logprobs')
98+
self.current_top_logprobs = kwargs.get('top_logprobs')
8799

88100
# Build messages
89101
messages = []
@@ -94,7 +106,10 @@ def process(self, tab, **kwargs):
94106
messages.append({"role": "user", "content": prompt})
95107

96108
# Update our history with the user's message now
97-
self.history.append({"role": "user", "content": prompt})
109+
msg = {"role": "user", "content": prompt}
110+
if kwargs.get('images'):
111+
msg['images'] = kwargs['images']
112+
self.history.append(msg)
98113

99114
# Reset current response accumulator
100115
self.current_response_full_text = ""
@@ -380,9 +395,6 @@ def on_send_clicked(self, widget):
380395
truncated = prompt[:20] + "..." if len(prompt) > 20 else prompt
381396
self.tab_label.set_label(truncated)
382397

383-
self.add_message(prompt, sender=_("You"))
384-
self.entry.set_text("")
385-
386398
# Get selected model
387399
selected_item = self.model_dropdown.get_selected_item()
388400
model_name = "llama3" # Fallback
@@ -404,13 +416,16 @@ def on_send_clicked(self, widget):
404416
# Clear image after reading
405417
self.on_clear_image_clicked(None)
406418

419+
self.add_message(prompt, sender=_("You"), images=images)
420+
self.entry.set_text("")
421+
407422
thread = threading.Thread(target=self.process_request, args=(prompt, model_name, images))
408423
thread.daemon = True
409424
thread.start()
410425

411-
def add_message(self, text, sender="System"):
426+
def add_message(self, text, sender="System", images=None):
412427
if sender == _("You"):
413-
bubble = UserBubble(text)
428+
bubble = UserBubble(text, images=images)
414429
self.chat_box.append(bubble)
415430
else:
416431
# System message or error
@@ -529,10 +544,12 @@ def load_initial_history(self, history):
529544
thinking_content = msg.get('thinking_content')
530545

531546
if role == 'user':
532-
self.add_message(content, sender=_("You"))
547+
images = msg.get('images')
548+
self.add_message(content, sender=_("You"), images=images)
533549
elif role == 'assistant':
534550
# Reconstruct with AiBubble
535-
bubble = AiBubble(model_name="Assistant")
551+
model_name = msg.get('model', 'Assistant')
552+
bubble = AiBubble(model_name=model_name)
536553
if thinking_content:
537554
bubble.append_thinking(thinking_content)
538555
bubble.append_text(content)
@@ -611,6 +628,12 @@ def load_chat_settings(self, chat_data):
611628
if 'stats' in options:
612629
self.stats_check.set_active(options['stats'])
613630

631+
# Logprobs
632+
if 'logprobs' in options:
633+
self.logprobs_check.set_active(options['logprobs'])
634+
if 'top_logprobs' in options and options['top_logprobs'] is not None:
635+
self.top_logprobs_entry.set_text(str(options['top_logprobs']))
636+
614637
def process_request(self, prompt, model_name, images=None):
615638
host = self.host_entry.get_text()
616639

0 commit comments

Comments
 (0)