19
19
import random
20
20
import time
21
21
import warnings
22
+ from bisect import bisect
23
+ from collections import defaultdict
22
24
from os import path
23
25
from threading import Lock
24
26
from uuid import uuid4
32
34
from ..message import Message
33
35
34
36
MAINTENANCE_SCALE = 1000000
35
- MAINTENANCE_COMMAND_BLACKLIST = {"ack" , "nack" }
37
+ MAINTENANCE_COMMAND_BLACKLIST = {"ack" , "nack" , "qsize" }
36
38
37
39
#: How many commands out of a million should trigger queue
38
40
#: maintenance.
50
52
#: the first time it's run, but it may be overwritten using this var.
51
53
DEFAULT_LUA_MAX_STACK = getenv_int ("dramatiq_lua_max_stack" )
52
54
55
+ #: The default priority steps. Each step will create a new queue
56
+ DEFAULT_PRIORITY_STEPS = [0 , 3 , 6 , 9 ]
57
+
58
+
59
+ def _get_all_priority_queue_names (queue_name , priority_steps ):
60
+ """
61
+ Yields the queue names for a given queue name and a list of priority steps.
62
+ Parameters:
63
+ queue_name(str): The queue name
64
+ priority_steps(list[int]): The configured priority steps
65
+ Returns: The queue names for the given queue name and priority steps
66
+ """
67
+ if dq_name (queue_name ) == queue_name :
68
+ return
69
+ for step in priority_steps :
70
+ yield pri_name (queue_name , step )
71
+
72
+
73
+ def pri_name (queue_name , priority ):
74
+ """Returns the queue name for a given queue name and a priority. If the given
75
+ queue name already belongs to a priority queue, then it is returned
76
+ unchanged.
77
+ """
78
+ if queue_name .endswith (".PR{}" .format (priority )):
79
+ return queue_name
80
+ return "{}.PR{}" .format (queue_name , priority )
81
+
53
82
54
83
class RedisBroker (Broker ):
55
84
"""A broker than can be used with Redis.
@@ -81,6 +110,11 @@ class RedisBroker(Broker):
81
110
dead-lettered messages are kept in Redis for.
82
111
requeue_deadline(int): Deprecated. Does nothing.
83
112
requeue_interval(int): Deprecated. Does nothing.
113
+ max_priority(int): Configure queues with max priority to support message’s broker_priority option.
114
+ The queuing is done by having multiple queues for each named queue.
115
+ The queues are then consumed by in order of priority. The max value of max_priority is 10.
116
+ priority_steps(list[int]): The priority range that is collapsed into the queues (4 by default).
117
+ The number of steps can be configured by providing a list of numbers in sorted order
84
118
client(redis.StrictRedis): A redis client to use.
85
119
**parameters: Connection parameters are passed directly
86
120
to :class:`redis.Redis`.
@@ -97,6 +131,8 @@ def __init__(
97
131
requeue_deadline = None ,
98
132
requeue_interval = None ,
99
133
client = None ,
134
+ max_priority = None ,
135
+ priority_steps = None ,
100
136
** parameters
101
137
):
102
138
super ().__init__ (middleware = middleware )
@@ -114,6 +150,14 @@ def __init__(
114
150
self .heartbeat_timeout = heartbeat_timeout
115
151
self .dead_message_ttl = dead_message_ttl
116
152
self .queues = set ()
153
+ if max_priority :
154
+ if max_priority > 10 :
155
+ raise ValueError ("max priority is supported up to 10" )
156
+ if not priority_steps :
157
+ self .priority_steps = DEFAULT_PRIORITY_STEPS [:bisect (DEFAULT_PRIORITY_STEPS , max_priority ) - 1 ]
158
+ self .priority_steps = priority_steps or []
159
+ else :
160
+ self .priority_steps = []
117
161
# TODO: Replace usages of StrictRedis (redis-py 2.x) with Redis in Dramatiq 2.0.
118
162
self .client = client or redis .StrictRedis (** parameters )
119
163
self .scripts = {name : self .client .register_script (script ) for name , script in _scripts .items ()}
@@ -163,6 +207,9 @@ def enqueue(self, message, *, delay=None):
163
207
ValueError: If ``delay`` is longer than 7 days.
164
208
"""
165
209
queue_name = message .queue_name
210
+ if "broker_priority" in message .options and delay is None :
211
+ priority = message .options ["broker_priority" ]
212
+ queue_name = self .priority_queue_name (queue_name , priority )
166
213
167
214
# Each enqueued message must have a unique id in Redis so
168
215
# using the Message's id isn't safe because messages may be
@@ -202,7 +249,7 @@ def flush(self, queue_name):
202
249
Parameters:
203
250
queue_name(str): The queue to flush.
204
251
"""
205
- for name in (queue_name , dq_name (queue_name )):
252
+ for name in (queue_name , dq_name (queue_name ), * _get_all_priority_queue_names ( queue_name , self . priority_steps ) ):
206
253
self .do_purge (name )
207
254
208
255
def flush_all (self ):
@@ -231,13 +278,36 @@ def join(self, queue_name, *, interval=100, timeout=None):
231
278
if deadline and time .monotonic () >= deadline :
232
279
raise QueueJoinTimeout (queue_name )
233
280
234
- size = self .do_qsize (queue_name )
281
+ size = self .get_queue_size (queue_name )
235
282
236
283
if size == 0 :
237
284
return
238
285
239
286
time .sleep (interval / 1000 )
240
287
288
+ def get_queue_size (self , queue_name ):
289
+ """
290
+ Get the number of messages in a queue. This method is only meant to be used in unit and integration tests.
291
+ Parameters:
292
+ queue_name(str): The queue whose message counts to get.
293
+
294
+ Returns: The number of messages in the queue, including the delay queue
295
+ """
296
+ size = 0
297
+ if self .priority_steps :
298
+ for queue_name in _get_all_priority_queue_names (queue_name , self .priority_steps ):
299
+ qsize = self .do_qsize (queue_name )
300
+ size += qsize
301
+ size += self .do_qsize (queue_name )
302
+ return size
303
+
304
+ def priority_queue_name (self , queue , priority ):
305
+ if priority is None or dq_name (queue ) == queue :
306
+ return queue
307
+
308
+ queue_number = self .priority_steps [bisect (self .priority_steps , priority ) - 1 ]
309
+ return pri_name (queue , queue_number )
310
+
241
311
def _should_do_maintenance (self , command ):
242
312
return int (
243
313
command not in MAINTENANCE_COMMAND_BLACKLIST and
@@ -310,7 +380,8 @@ def ack(self, message):
310
380
# The current queue might be different from message.queue_name
311
381
# if the message has been delayed so we want to ack on the
312
382
# current queue.
313
- self .broker .do_ack (self .queue_name , message .options ["redis_message_id" ])
383
+ queue_name = self .broker .priority_queue_name (self .queue_name , message .options .get ("broker_priority" ))
384
+ self .broker .do_ack (queue_name , message .options ["redis_message_id" ])
314
385
except redis .ConnectionError as e :
315
386
raise ConnectionClosed (e ) from None
316
387
finally :
@@ -319,19 +390,26 @@ def ack(self, message):
319
390
def nack (self , message ):
320
391
try :
321
392
# Same deal as above.
322
- self .broker .do_nack (self .queue_name , message .options ["redis_message_id" ])
393
+ queue_name = self .broker .priority_queue_name (self .queue_name , message .options .get ("broker_priority" ))
394
+ self .broker .do_nack (queue_name , message .options ["redis_message_id" ])
323
395
except redis .ConnectionError as e :
324
396
raise ConnectionClosed (e ) from None
325
397
finally :
326
398
self .queued_message_ids .discard (message .message_id )
327
399
328
400
def requeue (self , messages ):
329
- message_ids = [message .options ["redis_message_id" ] for message in messages ]
330
- if not message_ids :
331
- return
332
-
333
- self .logger .debug ("Re-enqueueing %r on queue %r." , message_ids , self .queue_name )
334
- self .broker .do_requeue (self .queue_name , * message_ids )
401
+ messages_id_by_queue = defaultdict (list )
402
+ for message in messages :
403
+ priority = message .options .get ("broker_priority" )
404
+ if priority is None :
405
+ queue_name = self .queue_name
406
+ else :
407
+ queue_name = self .broker .priority_queue_name (self .queue_name , priority )
408
+ messages_id_by_queue [queue_name ].append (message .options ["redis_message_id" ])
409
+
410
+ for queue_name , message_ids in messages_id_by_queue .items ():
411
+ self .logger .debug ("Re-enqueueing %r on queue %r." , message_ids , self .queue_name )
412
+ self .broker .do_requeue (queue_name , * message_ids )
335
413
336
414
def __next__ (self ):
337
415
try :
@@ -360,11 +438,16 @@ def __next__(self):
360
438
# prefetch up to that number of messages.
361
439
messages = []
362
440
if self .outstanding_message_count < self .prefetch :
363
- self .message_cache = messages = self .broker .do_fetch (
364
- self .queue_name ,
365
- self .prefetch - self .outstanding_message_count ,
366
- )
367
-
441
+ for queue_name in self .queue_names ():
442
+ # Ideally, we would want to sort the messages by their priority,
443
+ # but that will require decoding them now
444
+ self .message_cache = messages = self .broker .do_fetch (
445
+ queue_name ,
446
+ self .prefetch - self .outstanding_message_count ,
447
+ )
448
+
449
+ if messages :
450
+ break
368
451
# Because we didn't get any messages, we should
369
452
# progressively long poll up to the idle timeout.
370
453
if not messages :
@@ -374,6 +457,10 @@ def __next__(self):
374
457
except redis .ConnectionError as e :
375
458
raise ConnectionClosed (e ) from None
376
459
460
+ def queue_names (self ):
461
+ yield from _get_all_priority_queue_names (self .queue_name , self .broker .priority_steps )
462
+ yield self .queue_name
463
+
377
464
378
465
_scripts = {}
379
466
_scripts_path = path .join (path .abspath (path .dirname (__file__ )), "redis" )
0 commit comments