RedisBackend class reference
In most cases, you don't need to interact with backends directly, as they are used internally by Queue. For usual usage, see the Queue reference and Getting Started guides to learn how to configure and use different backends.
If you need to implement a custom backend or want to understand how existing backends work, here's the reference information for the backend classes.
sheppy.RedisBackend
RedisBackend(
url: str = "redis://127.0.0.1:6379",
consumer_group: str = "workers",
ttl: int | None = 30 * 24 * 60 * 60,
**kwargs: Any,
)
Bases: Backend
Source code in src/sheppy/backend/redis.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 | def __init__(
self,
url: str = "redis://127.0.0.1:6379",
consumer_group: str = "workers",
ttl: int | None = 30 * 24 * 60 * 60, # 30 days
**kwargs: Any
):
self.url = url
self.consumer_group = consumer_group
self.consumer_name = generate_unique_worker_id("consumer")
self.ttl = ttl
self.redis_kwargs = kwargs
self._client: redis.Redis | None = None
self._pool: redis.ConnectionPool | None = None
self._pending_messages: dict[str, tuple[str, str]] = {} # task_id -> (queue_name, message_id)
self._initialized_groups: set[str] = set()
self._results_stream_ttl = 60
|
consumer_group
consumer_group = consumer_group
consumer_name
consumer_name = generate_unique_worker_id('consumer')
connect
Source code in src/sheppy/backend/redis.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55 | async def connect(self) -> None:
try:
self._pool = redis.ConnectionPool.from_url(
self.url,
#decode_responses=self.decode_responses,
#max_connections=self.max_connections,
#protocol=3, # enable RESP version 3
**self.redis_kwargs
)
self._client = redis.Redis.from_pool(self._pool)
await self._client.ping() # type: ignore[misc]
except Exception as e:
self._client = None
self._pool = None
raise BackendError(f"Failed to connect to Redis: {e}") from e
|
disconnect
Source code in src/sheppy/backend/redis.py
| async def disconnect(self) -> None:
if self._client:
await self._client.aclose()
self._client = None
self._pool = None
self._pending_messages.clear()
self._initialized_groups.clear()
|
append
append(
queue_name: str,
tasks: list[dict[str, Any]],
unique: bool = True,
) -> list[bool]
Add new tasks to be processed.
Source code in src/sheppy/backend/redis.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172 | async def append(self, queue_name: str, tasks: list[dict[str, Any]], unique: bool = True) -> list[bool]:
"""Add new tasks to be processed."""
tasks_metadata_key = self._tasks_metadata_key(queue_name)
pending_tasks_key = self._pending_tasks_key(queue_name)
await self._ensure_consumer_group(pending_tasks_key)
if unique:
success = await self._create_tasks(queue_name, tasks)
to_queue = [t for i, t in enumerate(tasks) if success[i]]
else:
success = [True] * len(tasks)
to_queue = tasks
try:
async with self.client.pipeline(transaction=False) as pipe:
pipe.hsetnx(self._queues_registry_key(), queue_name, "{}")
for t in to_queue:
_task_data = json.dumps(t)
if not unique:
pipe.set(f"{tasks_metadata_key}:{t['id']}", _task_data)
# add to pending stream
pipe.xadd(pending_tasks_key, {"data": _task_data})
await pipe.execute()
except Exception as e:
raise BackendError(f"Failed to enqueue task: {e}") from e
return success
|
pop
pop(
queue_name: str,
limit: int = 1,
timeout: float | None = None,
) -> list[dict[str, Any]]
Get next tasks to process. Used primarily by workers.
Source code in src/sheppy/backend/redis.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208 | async def pop(self, queue_name: str, limit: int = 1, timeout: float | None = None) -> list[dict[str, Any]]:
"""Get next tasks to process. Used primarily by workers."""
pending_tasks_key = self._pending_tasks_key(queue_name)
await self._ensure_consumer_group(pending_tasks_key)
try:
result = await self.client.xreadgroup(
groupname=self.consumer_group,
consumername=self.consumer_name,
streams={pending_tasks_key: ">"}, # ">" means only new messages (not delivered to other consumers)
count=limit,
block=None if timeout is None or timeout == 0 else int(timeout * 1000)
)
if not result:
return []
messages = result[0][1] # [['stream-name', [(message_id, dict_data)]]]
if not messages:
return []
tasks = []
for message_id, fields in messages:
task_data = json.loads(fields[b"data"])
# store message_id for acknowledge()
self._pending_messages[task_data["id"]] = (queue_name, message_id.decode())
tasks.append(task_data)
return tasks
except Exception as e:
raise BackendError(f"Failed to dequeue task: {e}") from e
|
get_pending
get_pending(
queue_name: str, count: int = 1
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/redis.py
210
211
212
213
214
215
216
217 | async def get_pending(self, queue_name: str, count: int = 1) -> list[dict[str, Any]]:
pending_tasks_key = self._pending_tasks_key(queue_name)
await self._ensure_consumer_group(pending_tasks_key)
messages = await self.client.xrange(pending_tasks_key, count=count)
return [json.loads(fields[b"data"]) for _message_id, fields in messages]
|
size
size(queue_name: str) -> int
Source code in src/sheppy/backend/redis.py
| async def size(self, queue_name: str) -> int:
pending_tasks_key = self._pending_tasks_key(queue_name)
await self._ensure_consumer_group(pending_tasks_key)
return int(await self.client.xlen(pending_tasks_key))
|
clear
clear(queue_name: str) -> int
Source code in src/sheppy/backend/redis.py
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249 | async def clear(self, queue_name: str) -> int:
tasks_metadata_key = self._tasks_metadata_key(queue_name)
pending_tasks_key = self._pending_tasks_key(queue_name)
scheduled_key = self._scheduled_tasks_key(queue_name)
await self._ensure_consumer_group(pending_tasks_key)
count = 0
async for key in self.client.scan_iter(match=f"{tasks_metadata_key}:*", count=10000):
await self.client.delete(key)
count += 1
await self.client.xtrim(pending_tasks_key, maxlen=0)
await self.client.delete(scheduled_key)
await self.client.hdel(self._queues_registry_key(), queue_name) # type: ignore[misc]
await self.client.delete(self._rate_limit_key(queue_name))
sw_keys = [key async for key in self.client.scan_iter(match=self._sliding_window_key(queue_name, '*'), count=10000)]
if sw_keys:
await self.client.delete(*sw_keys)
slot_keys = [key async for key in self.client.scan_iter(match=self._rate_limit_slot_key(queue_name, '*'), count=10000)]
if slot_keys:
await self.client.delete(*slot_keys)
return count
|
get_tasks
get_tasks(
queue_name: str, task_ids: list[str]
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/redis.py
251
252
253
254
255
256
257
258
259
260 | async def get_tasks(self, queue_name: str, task_ids: list[str]) -> dict[str,dict[str, Any]]:
tasks_metadata_key = self._tasks_metadata_key(queue_name)
if not task_ids:
return {}
task_json = await self.client.mget([f"{tasks_metadata_key}:{t}" for t in task_ids])
tasks = [json.loads(d) for d in task_json if d]
return {t['id']: t for t in tasks}
|
schedule
schedule(
queue_name: str,
task_data: dict[str, Any],
at: datetime,
unique: bool = True,
) -> bool
Source code in src/sheppy/backend/redis.py
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283 | async def schedule(self, queue_name: str, task_data: dict[str, Any], at: datetime, unique: bool = True) -> bool:
tasks_metadata_key = self._tasks_metadata_key(queue_name)
scheduled_key = self._scheduled_tasks_key(queue_name)
if unique:
success = await self._create_tasks(queue_name, [task_data])
if not success[0]:
return False
try:
if not unique:
await self.client.set(f"{tasks_metadata_key}:{task_data['id']}", json.dumps(task_data))
score = at.timestamp()
async with self.client.pipeline(transaction=False) as pipe:
pipe.zadd(scheduled_key, {task_data['id']: score})
pipe.hsetnx(self._queues_registry_key(), queue_name, "{}")
await pipe.execute()
return True
except Exception as e:
raise BackendError(f"Failed to schedule task: {e}") from e
|
pop_scheduled
pop_scheduled(
queue_name: str, now: datetime | None = None
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/redis.py
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308 | async def pop_scheduled(self, queue_name: str, now: datetime | None = None) -> list[dict[str, Any]]:
scheduled_key = self._scheduled_tasks_key(queue_name)
tasks_metadata_key = self._tasks_metadata_key(queue_name)
score = now.timestamp() if now else time()
task_id_entries = await self.client.zrangebyscore(scheduled_key, 0, score)
claimed_ids = []
for entry in task_id_entries:
removed = await self.client.zrem(scheduled_key, entry)
if removed <= 0:
# some other worker already got this task at the same time, skip
continue
task_id = entry.decode() if isinstance(entry, bytes) else entry
claimed_ids.append(task_id)
if not claimed_ids:
return []
task_jsons = await self.client.mget([f"{tasks_metadata_key}:{tid}" for tid in claimed_ids])
return [json.loads(tj) for tj in task_jsons if tj]
|
store_result
store_result(
queue_name: str, task_data: dict[str, Any]
) -> bool
Source code in src/sheppy/backend/redis.py
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345 | async def store_result(self, queue_name: str, task_data: dict[str, Any]) -> bool:
tasks_metadata_key = self._tasks_metadata_key(queue_name)
finished_tasks_key = self._finished_tasks_key(queue_name)
pending_tasks_key = self._pending_tasks_key(queue_name)
await self._ensure_consumer_group(finished_tasks_key)
message_id = None
if task_data["id"] in self._pending_messages:
stored_queue, message_id = self._pending_messages[task_data["id"]]
if queue_name != stored_queue: # this should never happen
raise BackendError("queue name mismatch")
try:
# trim older messages to keep the stream small
min_id = f"{int((time() - self._results_stream_ttl) * 1000)}-0"
async with self.client.pipeline(transaction=True) as pipe:
# update task metadata with the results
pipe.set(f"{tasks_metadata_key}:{task_data['id']}", json.dumps(task_data), ex=self.ttl)
# add to finished stream for get_result notifications
if task_data["finished_at"] is not None: # only send notification on finished task (for retriable tasks we continue to wait)
pipe.xadd(finished_tasks_key, {"task_id": task_data["id"]}, minid=min_id)
pipe.incr(self._completed_counter_key(queue_name))
# ack and delete the task from the stream (cleanup)
if message_id:
pipe.xack(pending_tasks_key, self.consumer_group, message_id)
pipe.xdel(pending_tasks_key, message_id)
await pipe.execute()
self._pending_messages.pop(task_data["id"], None)
return True
except Exception as e:
raise BackendError(f"Failed to store task result: {e}") from e
|
get_stats
get_stats(queue_name: str) -> dict[str, int]
Source code in src/sheppy/backend/redis.py
347
348
349
350
351
352
353
354
355
356
357
358 | async def get_stats(self, queue_name: str) -> dict[str, int]:
scheduled_tasks_key = self._scheduled_tasks_key(queue_name)
pending_tasks_key = self._pending_tasks_key(queue_name)
pending = await self.client.xlen(pending_tasks_key)
completed = await self.client.get(self._completed_counter_key(queue_name))
return {
"pending": pending,
"completed": int(completed) if completed else 0,
"scheduled": await self.client.zcard(scheduled_tasks_key),
}
|
get_all_tasks
get_all_tasks(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/redis.py
360
361
362
363
364
365
366
367
368
369
370
371 | async def get_all_tasks(self, queue_name: str) -> list[dict[str, Any]]:
tasks_metadata_key = self._tasks_metadata_key(queue_name)
keys = []
async for key in self.client.scan_iter(match=f"{tasks_metadata_key}:*", count=10000):
keys.append(key)
if not keys:
return []
all_tasks_data = await self.client.mget(keys)
return [json.loads(task_json) for task_json in all_tasks_data if task_json]
|
get_results
get_results(
queue_name: str,
task_ids: list[str],
timeout: float | None = None,
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/redis.py
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440 | async def get_results(self, queue_name: str, task_ids: list[str], timeout: float | None = None) -> dict[str,dict[str, Any]]:
tasks_metadata_key = self._tasks_metadata_key(queue_name)
finished_tasks_key = self._finished_tasks_key(queue_name)
if not task_ids:
return {}
results = {}
remaining_ids = task_ids[:]
last_id = "0-0"
if timeout is not None and timeout >= 0:
with contextlib.suppress(redis.ResponseError):
last_id = (await self.client.xinfo_stream(finished_tasks_key))["last-generated-id"]
tasks = await self.client.mget([f"{tasks_metadata_key}:{t}" for t in task_ids])
for task_json in tasks:
if not task_json:
continue
t = json.loads(task_json)
if t.get("finished_at"):
results[t["id"]] = t
remaining_ids.remove(t["id"])
if not remaining_ids:
return results
if timeout is None or timeout < 0:
return results
# endless wait if timeout == 0
deadline = None if timeout == 0 else asyncio.get_running_loop().time() + timeout
while True:
if deadline:
remaining = deadline - asyncio.get_running_loop().time()
if remaining <= 0:
raise TimeoutError(f"Did not complete within {timeout} seconds")
else:
remaining = 0
messages = await self.client.xread(
{finished_tasks_key: last_id},
block=int(remaining * 1000),
count=1000
)
if not messages:
continue
for _, stream_messages in messages:
for msg_id, data in stream_messages:
last_id = msg_id
task_id = data.get(b"task_id").decode()
if task_id in remaining_ids:
task_json = await self.client.get(f"{tasks_metadata_key}:{task_id}")
if not task_json:
continue
t = json.loads(task_json)
if t.get("finished_at"): # should be always true because we only get notifications for finished tasks
results[t["id"]] = t
remaining_ids.remove(t["id"])
if not remaining_ids:
return results
|
acknowledge
acknowledge(queue_name: str, task_ids: list[str]) -> None
Source code in src/sheppy/backend/redis.py
442
443
444
445
446
447
448
449
450
451
452
453 | async def acknowledge(self, queue_name: str, task_ids: list[str]) -> None:
pending_tasks_key = self._pending_tasks_key(queue_name)
async with self.client.pipeline(transaction=False) as pipe:
for task_id in task_ids:
entry = self._pending_messages.pop(task_id, None)
if entry is None:
continue
_, message_id = entry
pipe.xack(pending_tasks_key, self.consumer_group, message_id)
pipe.xdel(pending_tasks_key, message_id)
await pipe.execute()
|
acquire_rate_limit
acquire_rate_limit(
queue_name: str,
key: str,
max_rate: int,
rate_period: float,
task_id: str,
strategy: str = "sliding_window",
) -> float | None
Source code in src/sheppy/backend/redis.py
| async def acquire_rate_limit(self, queue_name: str, key: str, max_rate: int, rate_period: float, task_id: str, strategy: str = "sliding_window") -> float | None:
if strategy == "fixed_window":
return await self._acquire_fixed_window(queue_name, key, max_rate, rate_period)
return await self._acquire_sliding_window(queue_name, key, max_rate, rate_period, task_id)
|
list_queues
list_queues() -> dict[str, int]
Source code in src/sheppy/backend/redis.py
541
542
543
544
545
546
547
548
549
550
551
552
553 | async def list_queues(self) -> dict[str, int]:
queue_names = await self.client.hkeys(self._queues_registry_key()) # type: ignore[misc]
queues = {}
for raw_name in sorted(queue_names):
queue_name = raw_name.decode() if isinstance(raw_name, bytes) else raw_name
try:
pending_count = await self.client.xlen(self._pending_tasks_key(queue_name))
queues[queue_name] = int(pending_count)
except redis.ResponseError:
queues[queue_name] = 0
return queues
|
get_scheduled
get_scheduled(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/redis.py
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569 | async def get_scheduled(self, queue_name: str) -> list[dict[str, Any]]:
scheduled_key = self._scheduled_tasks_key(queue_name)
tasks_metadata_key = self._tasks_metadata_key(queue_name)
task_ids = await self.client.zrange(scheduled_key, 0, -1)
if not task_ids:
return []
keys = [
f"{tasks_metadata_key}:{(tid.decode() if isinstance(tid, bytes) else tid)}"
for tid in task_ids
]
task_jsons = await self.client.mget(keys)
return [json.loads(tj) for tj in task_jsons if tj]
|
add_cron
add_cron(
queue_name: str,
deterministic_id: str,
task_cron: dict[str, Any],
) -> bool
Source code in src/sheppy/backend/redis.py
| async def add_cron(self, queue_name: str, deterministic_id: str, task_cron: dict[str, Any]) -> bool:
cron_key = self._cron_tasks_key(queue_name)
return bool(await self.client.hsetnx(cron_key, deterministic_id, json.dumps(task_cron))) # type: ignore[misc]
|
delete_cron
delete_cron(queue_name: str, deterministic_id: str) -> bool
Source code in src/sheppy/backend/redis.py
| async def delete_cron(self, queue_name: str, deterministic_id: str) -> bool:
cron_key = self._cron_tasks_key(queue_name)
return bool(await self.client.hdel(cron_key, deterministic_id)) # type: ignore[misc]
|
get_crons
get_crons(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/redis.py
| async def get_crons(self, queue_name: str) -> list[dict[str, Any]]:
cron_key = self._cron_tasks_key(queue_name)
cron_data = await self.client.hvals(cron_key) # type: ignore[misc]
return [json.loads(d) for d in cron_data]
|
store_workflow
store_workflow(
queue_name: str, workflow_data: dict[str, Any]
) -> bool
Source code in src/sheppy/backend/redis.py
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609 | async def store_workflow(self, queue_name: str, workflow_data: dict[str, Any]) -> bool:
workflows_key = self._workflows_key(queue_name)
workflow_id = workflow_data['id']
pending_key = self._workflow_pending_key(queue_name, workflow_id)
pending_index_key = self._workflow_pending_index_key(queue_name)
pending_ids = workflow_data.get('pending_task_ids', [])
try:
async with self.client.pipeline(transaction=True) as pipe:
pipe.hset(workflows_key, workflow_id, json.dumps(workflow_data))
if self.ttl:
pipe.hexpire(workflows_key, self.ttl, workflow_id)
if workflow_data.get('completed') or workflow_data.get('error'):
pipe.delete(pending_key)
pipe.srem(pending_index_key, workflow_id)
elif pending_ids:
pipe.sadd(pending_key, *pending_ids)
pipe.sadd(pending_index_key, workflow_id)
if self.ttl:
pipe.expire(pending_key, self.ttl)
await pipe.execute()
return True
except Exception as e:
raise BackendError(f"Failed to store workflow: {e}") from e
|
get_workflows
get_workflows(
queue_name: str, workflow_ids: list[str]
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/redis.py
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626 | async def get_workflows(self, queue_name: str, workflow_ids: list[str]) -> dict[str, dict[str, Any]]:
workflows_key = self._workflows_key(queue_name)
if not workflow_ids:
return {}
try:
data = await self.client.hmget(workflows_key, workflow_ids) # type: ignore[misc]
result = {}
for wf_json in data:
if wf_json:
wf = json.loads(wf_json)
result[wf["id"]] = wf
return result
except Exception as e:
raise BackendError(f"Failed to get workflows: {e}") from e
|
get_all_workflows
get_all_workflows(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/redis.py
628
629
630
631
632
633
634
635 | async def get_all_workflows(self, queue_name: str) -> list[dict[str, Any]]:
workflows_key = self._workflows_key(queue_name)
try:
all_data = await self.client.hvals(workflows_key) # type: ignore[misc]
return [json.loads(wf_json) for wf_json in all_data if wf_json]
except Exception as e:
raise BackendError(f"Failed to get all workflows: {e}") from e
|
get_pending_workflows
get_pending_workflows(
queue_name: str,
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/redis.py
637
638
639
640
641
642
643
644
645
646
647
648
649
650 | async def get_pending_workflows(self, queue_name: str) -> list[dict[str, Any]]:
workflows_key = self._workflows_key(queue_name)
pending_index_key = self._workflow_pending_index_key(queue_name)
try:
workflow_ids = await self.client.smembers(pending_index_key) # type: ignore[misc]
if not workflow_ids:
return []
ids = [wid.decode() if isinstance(wid, bytes) else wid for wid in workflow_ids]
data = await self.client.hmget(workflows_key, ids) # type: ignore[misc]
return [json.loads(wf_json) for wf_json in data if wf_json]
except Exception as e:
raise BackendError(f"Failed to get pending workflows: {e}") from e
|
delete_workflow
delete_workflow(queue_name: str, workflow_id: str) -> bool
Source code in src/sheppy/backend/redis.py
652
653
654
655
656
657
658
659
660
661
662
663
664 | async def delete_workflow(self, queue_name: str, workflow_id: str) -> bool:
workflows_key = self._workflows_key(queue_name)
pending_key = self._workflow_pending_key(queue_name, workflow_id)
pending_index_key = self._workflow_pending_index_key(queue_name)
try:
async with self.client.pipeline(transaction=True) as pipe:
pipe.hdel(workflows_key, workflow_id)
pipe.delete(pending_key)
pipe.srem(pending_index_key, workflow_id)
results = await pipe.execute()
return int(results[0]) > 0
except Exception as e:
raise BackendError(f"Failed to delete workflow: {e}") from e
|
mark_workflow_task_complete
mark_workflow_task_complete(
queue_name: str, workflow_id: str, task_id: str
) -> int
Source code in src/sheppy/backend/redis.py
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683 | async def mark_workflow_task_complete(self, queue_name: str, workflow_id: str, task_id: str) -> int:
pending_key = self._workflow_pending_key(queue_name, workflow_id)
try:
async with self.client.pipeline() as pipe:
pipe.srem(pending_key, task_id)
pipe.scard(pending_key)
results = await pipe.execute()
removed_count = results[0] # 1 if removed, 0 if not found
remaining_count = results[1]
if removed_count == 0:
return -1 # task not in pending set
return int(remaining_count)
except Exception as e:
raise BackendError(f"Failed to mark workflow task complete: {e}") from e
|