forked from waybarrios/vllm-mlx
-
Notifications
You must be signed in to change notification settings - Fork 64
Expand file tree
/
Copy pathtest_prompt_lookup.py
More file actions
343 lines (279 loc) · 13.5 KB
/
test_prompt_lookup.py
File metadata and controls
343 lines (279 loc) · 13.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
# SPDX-License-Identifier: Apache-2.0
"""Tests for PromptLookupDecoder."""
from vllm_mlx.speculative.prompt_lookup import PromptLookupDecoder
class TestPromptLookupDecoderInit:
"""Tests for PromptLookupDecoder initialization."""
def test_default_init(self):
decoder = PromptLookupDecoder()
assert decoder.num_draft_tokens == 4
assert decoder.ngram_size == 3
assert decoder.min_matches == 2
assert decoder._token_history == []
assert decoder.total_drafts == 0
assert decoder.successful_drafts == 0
assert decoder.total_draft_tokens == 0
assert decoder.accepted_tokens == 0
def test_custom_init(self):
decoder = PromptLookupDecoder(num_draft_tokens=8, ngram_size=5, min_matches=3)
assert decoder.num_draft_tokens == 8
assert decoder.ngram_size == 5
assert decoder.min_matches == 3
class TestPromptLookupDecoderReset:
"""Tests for reset method."""
def test_reset_clears_history(self):
decoder = PromptLookupDecoder()
decoder.add_prompt_tokens([1, 2, 3, 4, 5])
decoder.add_generated_token(6)
decoder.reset()
assert decoder._token_history == []
assert len(decoder._ngram_index) == 0
def test_reset_preserves_config(self):
decoder = PromptLookupDecoder(num_draft_tokens=8, ngram_size=5)
decoder.add_prompt_tokens([1, 2, 3])
decoder.reset()
assert decoder.num_draft_tokens == 8
assert decoder.ngram_size == 5
class TestPromptLookupDecoderAddTokens:
"""Tests for add_prompt_tokens and add_generated_token."""
def test_add_prompt_tokens_empty(self):
decoder = PromptLookupDecoder()
decoder.add_prompt_tokens([])
assert decoder._token_history == []
def test_add_prompt_tokens_single(self):
decoder = PromptLookupDecoder()
decoder.add_prompt_tokens([1])
assert decoder._token_history == [1]
def test_add_prompt_tokens_multiple(self):
decoder = PromptLookupDecoder(ngram_size=3)
decoder.add_prompt_tokens([1, 2, 3, 4, 5])
assert decoder._token_history == [1, 2, 3, 4, 5]
def test_add_generated_token(self):
decoder = PromptLookupDecoder()
decoder.add_prompt_tokens([1, 2])
decoder.add_generated_token(3)
assert decoder._token_history == [1, 2, 3]
def test_ngram_index_populated(self):
"""N-gram index stores preceding n-grams pointing to positions."""
decoder = PromptLookupDecoder(ngram_size=3)
# History: [1, 2, 3, 4]
# At pos 3 (token 4): ngram (1,2,3) -> pos 3
decoder.add_prompt_tokens([1, 2, 3, 4])
assert (1, 2, 3) in decoder._ngram_index
class TestPromptLookupDecoderGetDraftTokens:
"""Tests for get_draft_tokens method.
Key: query = history[-ngram_size:]. The index maps an n-gram to positions
where that n-gram PRECEDED the token. Continuation starts at the matched
position.
"""
def test_empty_history_returns_empty(self):
decoder = PromptLookupDecoder()
assert decoder.get_draft_tokens() == []
def test_history_shorter_than_ngram_size(self):
decoder = PromptLookupDecoder(ngram_size=3)
decoder.add_prompt_tokens([1, 2])
assert decoder.get_draft_tokens() == []
def test_no_matching_ngram(self):
decoder = PromptLookupDecoder(ngram_size=3, min_matches=1)
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 6, 7, 8])
# query = (6, 7, 8) — never seen before, no match
assert decoder.get_draft_tokens() == []
def test_ngram_match_found(self):
"""When last 3 tokens match an earlier n-gram, return continuation."""
decoder = PromptLookupDecoder(num_draft_tokens=4, ngram_size=3, min_matches=1)
# History: [1, 2, 3, 4, 5, 1, 2, 3]
# query = history[-3:] = (1, 2, 3)
# (1,2,3) appears at start=0 and start=5 (current), skip current
# continuation from 0+3=3: [4, 5, 1, 2]
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 1, 2, 3])
drafts = decoder.get_draft_tokens()
assert drafts == [4, 5, 1, 2]
def test_min_matches_threshold_not_met(self):
"""Draft not returned if continuation < min_matches."""
decoder = PromptLookupDecoder(num_draft_tokens=4, ngram_size=3, min_matches=3)
# History: [1, 2, 3, 4, 5, 1, 2, 3]
# Match at pos 3, continuation = [5, 1, 2, 3] (4 tokens >= min 3)
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 1, 2, 3])
drafts = decoder.get_draft_tokens()
assert len(drafts) == 4 # >= min_matches=3
def test_min_matches_threshold_blocks(self):
"""Draft not returned when continuation is too short."""
decoder = PromptLookupDecoder(num_draft_tokens=4, ngram_size=3, min_matches=3)
# History: [1, 2, 3, 4, 1, 2, 3]
# Match at pos 3 (token=4), continuation = [1, 2, 3] (3 tokens)
# But continuation is capped by num_draft_tokens and history end
# continuation from pos 4 to min(4+4+1, 7) = 7: [1, 2, 3] -> 3 tokens >= 3
decoder.add_prompt_tokens([1, 2, 3, 4, 1, 2, 3])
drafts = decoder.get_draft_tokens()
assert len(drafts) >= 3
def test_num_draft_tokens_limits_output(self):
decoder = PromptLookupDecoder(num_draft_tokens=2, ngram_size=3, min_matches=1)
# History: [1, 2, 3, 4, 5, 6, 1, 2, 3]
# query = (1,2,3), matches at start=0 (start=6 is current, skipped)
# continuation from 0+3=3: [4, 5, 6, 1, 2, 3], limited to 2
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 6, 1, 2, 3])
drafts = decoder.get_draft_tokens()
assert len(drafts) == 2
assert drafts == [4, 5]
def test_repeating_pattern(self):
"""Repeating pattern gives long continuations."""
decoder = PromptLookupDecoder(num_draft_tokens=4, ngram_size=3, min_matches=1)
# History: [1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3]
# query = (_, 2, 3) -> matches multiple positions
# Best continuation starts after the latest non-current match
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3])
drafts = decoder.get_draft_tokens()
assert len(drafts) > 0
# Match pos has token after the n-gram; continuation starts one further
# Best match should give continuation from the repeating pattern
def test_best_continuation_selected(self):
"""Longest continuation wins among multiple matches."""
decoder = PromptLookupDecoder(num_draft_tokens=4, ngram_size=2, min_matches=1)
# History: [1, 2, 1, 2, 3, 4, 5, 1, 2]
# query = (1, 2)
# Match at pos 2 (token=1): continuation [2, 3, 4, 5] (4 tokens)
# Match at pos 4 (token=3): wait, need to check what ngram (1,2) maps to
# Actually: at pos 2, preceding 2-gram is (1,2), maps to pos 2
# At pos 4, preceding 2-gram is (2,3)... no.
# Let me trace: _add_token stores ngram = history[pos-n:pos]
# pos 2 (token=1): n=2 -> ngram=(1,2) -> pos 2? No: history[0:2]=(1,2) -> pos 2
# But token at pos 2 is 1 (the third element of [1,2,1,2,3,4,5,1,2])
# Continuation from pos 2+1 = [2,3,4,5,1,2][:4] = [2,3,4,5]
decoder.add_prompt_tokens([1, 2, 1, 2, 3, 4, 5, 1, 2])
drafts = decoder.get_draft_tokens()
assert len(drafts) > 0
def test_current_position_excluded(self):
"""Current position in n-gram index should be skipped."""
decoder = PromptLookupDecoder(num_draft_tokens=2, ngram_size=3, min_matches=1)
# History: [1, 2, 3] — only one occurrence of (1,2,3) at the end
# query = (1, 2, 3), only position is current -> skip -> empty
decoder.add_prompt_tokens([1, 2, 3])
drafts = decoder.get_draft_tokens()
assert drafts == []
class TestPromptLookupDecoderRecordAccepted:
"""Tests for record_accepted method."""
def test_record_accepted_zero(self):
decoder = PromptLookupDecoder()
decoder.record_accepted(0)
assert decoder.successful_drafts == 0
assert decoder.accepted_tokens == 0
def test_record_accepted_positive(self):
decoder = PromptLookupDecoder()
decoder.record_accepted(3)
assert decoder.successful_drafts == 1
assert decoder.accepted_tokens == 3
def test_record_accepted_multiple_calls(self):
decoder = PromptLookupDecoder()
decoder.record_accepted(2)
decoder.record_accepted(3)
decoder.record_accepted(1)
assert decoder.successful_drafts == 3
assert decoder.accepted_tokens == 6
class TestPromptLookupDecoderGetStats:
"""Tests for get_stats method."""
def test_initial_stats(self):
decoder = PromptLookupDecoder()
stats = decoder.get_stats()
assert stats["total_drafts"] == 0
assert stats["successful_drafts"] == 0
assert stats["total_draft_tokens"] == 0
assert stats["accepted_tokens"] == 0
assert stats["acceptance_rate"] == 0.0
assert stats["history_size"] == 0
def test_stats_after_draft(self):
decoder = PromptLookupDecoder(ngram_size=3, min_matches=1)
# Need query to match: [1,2,3,4,5,1,2,3]
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 1, 2, 3])
drafts = decoder.get_draft_tokens()
assert len(drafts) > 0
stats = decoder.get_stats()
assert stats["total_drafts"] == 1
assert stats["total_draft_tokens"] == len(drafts)
assert stats["history_size"] == 8
def test_stats_acceptance_rate(self):
decoder = PromptLookupDecoder(ngram_size=3, min_matches=1)
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 1, 2, 3])
drafts = decoder.get_draft_tokens()
num_drafts = len(drafts)
decoder.record_accepted(num_drafts)
stats = decoder.get_stats()
assert stats["acceptance_rate"] == 1.0
def test_stats_acceptance_rate_partial(self):
decoder = PromptLookupDecoder(ngram_size=3, min_matches=1)
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 6, 7, 1, 2, 3])
drafts = decoder.get_draft_tokens()
assert len(drafts) > 0
accepted = len(drafts) // 2
if accepted == 0:
accepted = 1
decoder.record_accepted(accepted)
stats = decoder.get_stats()
expected_rate = accepted / len(drafts)
assert abs(stats["acceptance_rate"] - expected_rate) < 0.01
class TestPromptLookupDecoderEdgeCases:
"""Tests for edge cases."""
def test_ngram_size_one(self):
"""With ngram_size=1, single token lookup."""
decoder = PromptLookupDecoder(ngram_size=1, num_draft_tokens=3, min_matches=1)
# History: [5, 5, 5, 5]
# query = (5,) — matches positions where preceding 1-gram is (5)
# At pos 1: ngram (5,) -> pos 1
# At pos 2: ngram (5,) -> pos 2
# At pos 3: ngram (5,) -> pos 3
# Current pos = 3, skip. Best from pos 2: continuation [5][:3] = [5]
# Actually from pos 1: continuation = [5, 5][:3] = [5, 5]
decoder.add_prompt_tokens([5, 5, 5, 5])
drafts = decoder.get_draft_tokens()
assert len(drafts) >= 1
assert all(t == 5 for t in drafts)
def test_large_ngram_size(self):
decoder = PromptLookupDecoder(ngram_size=10, min_matches=1)
tokens = list(range(15))
decoder.add_prompt_tokens(tokens)
# query = last 10 tokens = [5..14], only one occurrence at end
drafts = decoder.get_draft_tokens()
assert drafts == []
def test_no_repetition_in_prompt(self):
decoder = PromptLookupDecoder(ngram_size=3, min_matches=1)
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 6, 7, 8])
# query = (6, 7, 8) — unique, no earlier match
drafts = decoder.get_draft_tokens()
assert drafts == []
def test_exactly_ngram_size_history(self):
decoder = PromptLookupDecoder(ngram_size=3, min_matches=1)
decoder.add_prompt_tokens([1, 2, 3])
# Only 3 tokens, query = (1,2,3) at pos=2 (only occurrence, is current)
drafts = decoder.get_draft_tokens()
assert drafts == []
def test_reset_between_generations(self):
"""Test typical usage: reset between generation runs."""
decoder = PromptLookupDecoder(ngram_size=3, min_matches=1)
# First generation
decoder.add_prompt_tokens([1, 2, 3, 4, 5, 1, 2, 3])
drafts1 = decoder.get_draft_tokens()
assert len(drafts1) > 0
# Reset
decoder.reset()
# Second generation — completely different tokens
decoder.add_prompt_tokens([10, 20, 30, 40, 50, 10, 20, 30])
drafts2 = decoder.get_draft_tokens()
assert len(drafts2) > 0
# History should only contain second generation's tokens
assert 1 not in decoder._token_history
assert 10 in decoder._token_history
def test_generated_tokens_extend_ngram_index(self):
"""Generated tokens create new n-grams for future lookup."""
decoder = PromptLookupDecoder(ngram_size=3, num_draft_tokens=2, min_matches=1)
# Prompt: [1, 2, 3, 4]
decoder.add_prompt_tokens([1, 2, 3, 4])
# No match yet — query (2,3,4) only at current pos
assert decoder.get_draft_tokens() == []
# Generate tokens that create a pattern
decoder.add_generated_token(1)
decoder.add_generated_token(2)
decoder.add_generated_token(3)
# History = [1,2,3,4,1,2,3]
# query = (1,2,3), matches at start=0 (start=4 is current, skipped)
# continuation from 0+3=3: [4, 1, 2, 3], limited to 2 -> [4, 1]
drafts = decoder.get_draft_tokens()
assert len(drafts) > 0
assert drafts[0] == 4