1
- import pytest
2
1
import json
3
2
from unittest .mock import patch
3
+
4
+ import pytest
5
+
4
6
import litellm
5
- from litellm .litellm_core_utils .prompt_templates .factory import ollama_pt , BAD_MESSAGE_ERROR_STR
7
+ from litellm .litellm_core_utils .prompt_templates .factory import (
8
+ BAD_MESSAGE_ERROR_STR ,
9
+ ollama_pt ,
10
+ )
11
+
6
12
7
13
def test_ollama_pt_simple_messages ():
8
14
"""Test basic functionality with simple text messages"""
@@ -11,14 +17,15 @@ def test_ollama_pt_simple_messages():
11
17
{"role" : "assistant" , "content" : "How can I help you?" },
12
18
{"role" : "user" , "content" : "Hello" },
13
19
]
14
-
20
+
15
21
result = ollama_pt (model = "llama2" , messages = messages )
16
-
22
+
17
23
expected_prompt = "### System:\n You are a helpful assistant\n \n ### Assistant:\n How can I help you?\n \n ### User:\n Hello\n \n "
18
24
assert isinstance (result , dict )
19
25
assert result ["prompt" ] == expected_prompt
20
26
assert result ["images" ] == []
21
27
28
+
22
29
def test_ollama_pt_consecutive_user_messages ():
23
30
"""Test handling consecutive user messages"""
24
31
messages = [
@@ -28,14 +35,15 @@ def test_ollama_pt_consecutive_user_messages():
28
35
{"role" : "assistant" , "content" : "I'm good, thanks!" },
29
36
{"role" : "user" , "content" : "I am well too." },
30
37
]
31
-
38
+
32
39
result = ollama_pt (model = "llama2" , messages = messages )
33
-
40
+
34
41
# Consecutive user messages should be merged
35
42
expected_prompt = "### User:\n Hello\n \n ### Assistant:\n How can I help you?\n \n ### User:\n How are you?\n \n ### Assistant:\n I'm good, thanks!\n \n ### User:\n I am well too.\n \n "
36
43
assert isinstance (result , dict )
37
44
assert result ["prompt" ] == expected_prompt
38
45
46
+
39
47
# def test_ollama_pt_consecutive_system_messages():
40
48
# """Test handling consecutive system messages"""
41
49
# messages = [
@@ -44,9 +52,9 @@ def test_ollama_pt_consecutive_user_messages():
44
52
# {"role": "system", "content": "Be concise and polite"},
45
53
# {"role": "assistant", "content": "How can I help you?"}
46
54
# ]
47
-
55
+
48
56
# result = ollama_pt(model="llama2", messages=messages)
49
-
57
+
50
58
# # Consecutive system messages should be merged
51
59
# expected_prompt = "### User:\nHello\n\n### System:\nYou are a helpful assistantBe concise and polite\n\n### Assistant:\nHow can I help you?\n\n"
52
60
# assert result == expected_prompt
@@ -59,9 +67,9 @@ def test_ollama_pt_consecutive_user_messages():
59
67
# {"role": "assistant", "content": "How can I help you?"},
60
68
# {"role": "user", "content": "Tell me a joke"}
61
69
# ]
62
-
70
+
63
71
# result = ollama_pt(model="llama2", messages=messages)
64
-
72
+
65
73
# # Consecutive assistant messages should be merged
66
74
# expected_prompt = "### User:\nHello\n\n### Assistant:\nHi there!How can I help you?\n\n### User:\nTell me a joke\n\n"
67
75
# assert result["prompt"] == expected_prompt
@@ -75,9 +83,9 @@ def test_ollama_pt_consecutive_user_messages():
75
83
# ]},
76
84
# {"role": "assistant", "content": "That's a cat."}
77
85
# ]
78
-
86
+
79
87
# result = ollama_pt(model="llama2", messages=messages)
80
-
88
+
81
89
# expected_prompt = "### User:\nWhat's in this image?\n\n### Assistant:\nThat's a cat.\n\n"
82
90
# assert result["prompt"] == expected_prompt
83
91
# assert result["images"] == ["http://example.com/image.jpg"]
@@ -91,9 +99,9 @@ def test_ollama_pt_consecutive_user_messages():
91
99
# ]},
92
100
# {"role": "assistant", "content": "That's a cat."}
93
101
# ]
94
-
102
+
95
103
# result = ollama_pt(model="llama2", messages=messages)
96
-
104
+
97
105
# expected_prompt = "### User:\nWhat's in this image?\n\n### Assistant:\nThat's a cat.\n\n"
98
106
# assert result["prompt"] == expected_prompt
99
107
# assert result["images"] == ["http://example.com/image.jpg"]
@@ -116,9 +124,9 @@ def test_ollama_pt_consecutive_user_messages():
116
124
# },
117
125
# {"role": "tool", "content": "Sunny, 72°F"}
118
126
# ]
119
-
127
+
120
128
# result = ollama_pt(model="llama2", messages=messages)
121
-
129
+
122
130
# # Check if tool call is included in the prompt
123
131
# assert "### User:\nWhat's the weather in San Francisco?" in result["prompt"]
124
132
# assert "### Assistant:\nI'll check the weather for you.Tool Calls:" in result["prompt"]
@@ -131,18 +139,18 @@ def test_ollama_pt_consecutive_user_messages():
131
139
# messages = [
132
140
# {"role": "invalid_role", "content": "This is an invalid role"}
133
141
# ]
134
-
142
+
135
143
# with pytest.raises(litellm.BadRequestError) as excinfo:
136
144
# ollama_pt(model="llama2", messages=messages)
137
-
145
+
138
146
# assert BAD_MESSAGE_ERROR_STR in str(excinfo.value)
139
147
140
148
# def test_ollama_pt_empty_messages():
141
149
# """Test with empty messages list"""
142
150
# messages = []
143
-
151
+
144
152
# result = ollama_pt(model="llama2", messages=messages)
145
-
153
+
146
154
# assert result["prompt"] == ""
147
155
# assert result["images"] == []
148
156
@@ -155,9 +163,9 @@ def test_ollama_pt_consecutive_user_messages():
155
163
# {"role": "assistant", "content": "To get to the other side!"},
156
164
# {"role": "tool", "content": "Joke rating: 5/10"}
157
165
# ]
158
-
166
+
159
167
# result = ollama_pt(model="llama2", messages=messages)
160
-
168
+
161
169
# assert "### User:\nTell me a joke" in result["prompt"]
162
170
# assert "### Assistant:\nWhy did the chicken cross the road?" in result["prompt"]
163
171
# assert "### User:\nWhy?" in result["prompt"]
@@ -171,9 +179,9 @@ def test_ollama_pt_consecutive_user_messages():
171
179
# {"role": "function", "content": "The result is 4"},
172
180
# {"role": "assistant", "content": "The answer is 4."}
173
181
# ]
174
-
182
+
175
183
# result = ollama_pt(model="llama2", messages=messages)
176
-
184
+
177
185
# assert "### User:\nWhat's 2+2?The result is 4\n\n" in result["prompt"]
178
186
# assert "### Assistant:\nThe answer is 4.\n\n" in result["prompt"]
179
187
@@ -187,9 +195,9 @@ def test_ollama_pt_consecutive_user_messages():
187
195
# ]},
188
196
# {"role": "assistant", "content": "Both images show cats, but different breeds."}
189
197
# ]
190
-
198
+
191
199
# result = ollama_pt(model="llama2", messages=messages)
192
-
200
+
193
201
# expected_prompt = "### User:\nCompare these images:\n\n### Assistant:\nBoth images show cats, but different breeds.\n\n"
194
202
# assert result["prompt"] == expected_prompt
195
203
# assert result["images"] == ["http://example.com/image1.jpg", "http://example.com/image2.jpg"]
@@ -206,12 +214,12 @@ def test_ollama_pt_consecutive_user_messages():
206
214
# {"role": "system", "content": "Be helpful"},
207
215
# {"role": "assistant", "content": "I see a cat in the image."}
208
216
# ]
209
-
217
+
210
218
# result = ollama_pt(model="llama2", messages=messages)
211
-
219
+
212
220
# assert "### User:\nHello\n\n" in result["prompt"]
213
221
# assert "### Assistant:\nHi there!\n\n" in result["prompt"]
214
222
# assert "### User:\nLook at this:\n\n" in result["prompt"]
215
223
# assert "### System:\nBe helpful\n\n" in result["prompt"]
216
224
# assert "### Assistant:\nI see a cat in the image.\n\n" in result["prompt"]
217
- # assert result["images"] == ["http://example.com/image.jpg"]
225
+ # assert result["images"] == ["http://example.com/image.jpg"]
0 commit comments