Skip to content

MemoryBackend class reference

In most cases, you don't need to interact with backends directly, as they are used internally by Queue. For usual usage, see the Queue reference and Getting Started guides to learn how to configure and use different backends.

If you need to implement a custom backend or want to understand how existing backends work, here's the reference information for the backend classes.

sheppy.MemoryBackend

MemoryBackend(
    *,
    instant_processing: bool = True,
    dependency_overrides: dict[
        Callable[..., Any], Callable[..., Any]
    ]
    | None = None,
)

Bases: Backend

Source code in src/sheppy/backend/memory.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def __init__(self,
             *,
             instant_processing: bool = True,
             dependency_overrides: dict[Callable[..., Any], Callable[..., Any]] | None = None,
             ) -> None:

    self._task_metadata: dict[str, dict[str, dict[str, Any]]] = defaultdict(dict)  # {QUEUE_NAME: {TASK_ID: task_data}}
    self._pending: dict[str, list[str]] = defaultdict(list)
    self._scheduled: dict[str, list[ScheduledTask]] = defaultdict(list)
    self._crons: dict[str, dict[str, dict[str, Any]]] = defaultdict(dict)
    self._workflows: dict[str, dict[str, dict[str, Any]]] = defaultdict(dict)  # {QUEUE_NAME: {WORKFLOW_ID: workflow_data}}

    self._rate_limits: dict[str, list[float]] = defaultdict(list)
    self._locks: dict[str, asyncio.Lock] = defaultdict(asyncio.Lock)  # for thread-safety
    self._connected = False

    self._instant_processing = instant_processing
    self._task_processor = TaskProcessor(dependency_overrides=dependency_overrides)
    self._worker_id = "MemoryBackend"

is_connected

is_connected: bool

connect

connect() -> None
Source code in src/sheppy/backend/memory.py
44
45
async def connect(self) -> None:
    self._connected = True

disconnect

disconnect() -> None
Source code in src/sheppy/backend/memory.py
47
48
async def disconnect(self) -> None:
    self._connected = False

append

append(
    queue_name: str,
    tasks: list[dict[str, Any]],
    unique: bool = True,
) -> list[bool]
Source code in src/sheppy/backend/memory.py
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
async def append(self, queue_name: str, tasks: list[dict[str, Any]], unique: bool = True) -> list[bool]:
    self._check_connected()

    if unique:
        success = await self._create_tasks(queue_name, tasks)
        to_queue = [t for i, t in enumerate(tasks) if success[i]]
    else:
        success = [True] * len(tasks)
        to_queue = tasks

    async with self._locks[queue_name]:
        for task in to_queue:
            if not unique:
                self._task_metadata[queue_name][task["id"]] = task

            self._pending[queue_name].append(task["id"])

    if self._instant_processing:
        for i, task_data in enumerate(tasks):
            if success[i]:
                await self._process_task(queue_name, task_data)

    return success

pop

pop(
    queue_name: str,
    limit: int = 1,
    timeout: float | None = None,
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
async def pop(self, queue_name: str, limit: int = 1, timeout: float | None = None) -> list[dict[str, Any]]:
    self._check_connected()

    start_time = asyncio.get_event_loop().time()

    while True:
        async with self._locks[queue_name]:
            if self._pending[queue_name]:
                tasks = []
                q = self._pending[queue_name]

                for _ in range(min(limit, len(q))):
                    task_id = q.pop(0)
                    task_data = self._task_metadata[queue_name].get(task_id)
                    if task_data:
                        tasks.append(task_data)

                return tasks

        if timeout is None or timeout <= 0:
            return []

        elapsed = asyncio.get_event_loop().time() - start_time
        if elapsed >= timeout:
            return []

        await asyncio.sleep(min(0.05, timeout - elapsed))

get_pending

get_pending(
    queue_name: str, count: int = 1
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
120
121
122
123
124
125
126
127
128
129
130
131
async def get_pending(self, queue_name: str, count: int = 1) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        task_ids = list(self._pending[queue_name])[:count]

        tasks = []
        for t in task_ids:
            if task_data := self._task_metadata[queue_name].get(t):
                tasks.append(task_data)

        return tasks

size

size(queue_name: str) -> int
Source code in src/sheppy/backend/memory.py
134
135
136
137
138
async def size(self, queue_name: str) -> int:
    self._check_connected()

    async with self._locks[queue_name]:
        return len(self._pending[queue_name])

clear

clear(queue_name: str) -> int
Source code in src/sheppy/backend/memory.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
async def clear(self, queue_name: str) -> int:
    self._check_connected()

    async with self._locks[queue_name]:
        queue_size = len(self._task_metadata[queue_name])
        queue_cron_size = len(self._crons[queue_name])

        self._task_metadata[queue_name].clear()
        self._pending[queue_name].clear()
        self._scheduled[queue_name].clear()
        self._crons[queue_name].clear()

        rl_keys = [k for k in self._rate_limits if k.startswith(f"{queue_name}:")]
        for k in rl_keys:
            del self._rate_limits[k]

        return queue_size + queue_cron_size

get_tasks

get_tasks(
    queue_name: str, task_ids: list[str]
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/memory.py
158
159
160
161
162
163
164
165
166
167
168
async def get_tasks(self, queue_name: str, task_ids: list[str]) -> dict[str,dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        results = {}
        for task_id in task_ids:
            result = self._task_metadata[queue_name].get(task_id)
            if result:
                results[task_id] = result

        return results

schedule

schedule(
    queue_name: str,
    task_data: dict[str, Any],
    at: datetime,
    unique: bool = True,
) -> bool
Source code in src/sheppy/backend/memory.py
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
async def schedule(self, queue_name: str, task_data: dict[str, Any], at: datetime, unique: bool = True) -> bool:
    self._check_connected()

    if unique:
        success = await self._create_tasks(queue_name, [task_data])
        if not success[0]:
            return False

    async with self._locks[queue_name]:
        if not unique:
            self._task_metadata[queue_name][task_data["id"]] = task_data

    if self._instant_processing:
        await self.append(queue_name, [task_data], unique=False)
    else:
        async with self._locks[queue_name]:
            scheduled_task = ScheduledTask(at, task_data["id"])
            heapq.heappush(self._scheduled[queue_name], scheduled_task)

    return True

pop_scheduled

pop_scheduled(
    queue_name: str, now: datetime | None = None
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
async def pop_scheduled(self, queue_name: str, now: datetime | None = None) -> list[dict[str, Any]]:
    self._check_connected()

    if now is None:
        now = datetime.now(timezone.utc)

    async with self._locks[queue_name]:
        tasks = []
        scheduled_tasks = self._scheduled[queue_name]

        while scheduled_tasks and scheduled_tasks[0].scheduled_time <= now:
            scheduled_task = heapq.heappop(scheduled_tasks)
            task_data = self._task_metadata[queue_name].get(scheduled_task.task_id)
            if task_data:
                tasks.append(task_data)

        return tasks

store_result

store_result(
    queue_name: str, task_data: dict[str, Any]
) -> bool
Source code in src/sheppy/backend/memory.py
209
210
211
212
213
214
215
async def store_result(self, queue_name: str, task_data: dict[str, Any]) -> bool:
    self._check_connected()

    async with self._locks[queue_name]:
        self._task_metadata[queue_name][task_data['id']] = task_data

        return True

get_results

get_results(
    queue_name: str,
    task_ids: list[str],
    timeout: float | None = None,
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/memory.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
async def get_results(self, queue_name: str, task_ids: list[str], timeout: float | None = None) -> dict[str,dict[str, Any]]:
    self._check_connected()

    start_time = asyncio.get_event_loop().time()

    if not task_ids:
        return {}

    results = {}
    remaining_ids = task_ids[:]

    while True:
        async with self._locks[queue_name]:
            for task_id in remaining_ids[:]:
                task_data = self._task_metadata[queue_name].get(task_id, {})

                if task_data.get("finished_at"):
                    results[task_id] = task_data
                    remaining_ids.remove(task_id)

        if not remaining_ids:
            return results

        if timeout is None or timeout < 0:
            return results

        # endless wait if timeout == 0
        if timeout == 0:
            await asyncio.sleep(0.05)
            continue

        elapsed = asyncio.get_event_loop().time() - start_time
        if elapsed >= timeout:
            raise TimeoutError(f"Did not complete within {timeout} seconds")

        await asyncio.sleep(min(0.05, timeout - elapsed))

get_stats

get_stats(queue_name: str) -> dict[str, int]
Source code in src/sheppy/backend/memory.py
254
255
256
257
258
259
260
261
262
async def get_stats(self, queue_name: str) -> dict[str, int]:
    self._check_connected()

    async with self._locks[queue_name]:
        return {
            "pending": len(self._pending[queue_name]),
            "completed": len([t for t in self._task_metadata[queue_name].values() if t["finished_at"]]),
            "scheduled": len(self._scheduled[queue_name]),
        }

get_all_tasks

get_all_tasks(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
264
265
266
267
268
269
async def get_all_tasks(self, queue_name: str) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        tasks = self._task_metadata[queue_name]
        return list(tasks.values())

list_queues

list_queues() -> dict[str, int]
Source code in src/sheppy/backend/memory.py
271
272
273
274
275
276
277
278
279
async def list_queues(self) -> dict[str, int]:
    self._check_connected()

    queues = {}
    for queue_name in self._task_metadata:
        async with self._locks[queue_name]:
            queues[queue_name] = len(self._pending[queue_name])

    return queues

get_scheduled

get_scheduled(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
281
282
283
284
285
286
287
288
289
290
291
async def get_scheduled(self, queue_name: str) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        tasks = []
        for scheduled_task in self._scheduled[queue_name]:
            task_data = self._task_metadata[queue_name].get(scheduled_task.task_id)
            if task_data:
                tasks.append(task_data)

        return tasks

add_cron

add_cron(
    queue_name: str,
    deterministic_id: str,
    task_cron: dict[str, Any],
) -> bool
Source code in src/sheppy/backend/memory.py
293
294
295
296
297
298
299
300
async def add_cron(self, queue_name: str, deterministic_id: str, task_cron: dict[str, Any]) -> bool:
    self._check_connected()

    async with self._locks[queue_name]:
        if deterministic_id not in self._crons[queue_name]:
            self._crons[queue_name][deterministic_id] = task_cron
            return True
        return False

delete_cron

delete_cron(queue_name: str, deterministic_id: str) -> bool
Source code in src/sheppy/backend/memory.py
302
303
304
305
306
307
308
309
async def delete_cron(self, queue_name: str, deterministic_id: str) -> bool:
    self._check_connected()

    async with self._locks[queue_name]:
        if deterministic_id in self._crons[queue_name]:
            del self._crons[queue_name][deterministic_id]
            return True
        return False

get_crons

get_crons(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
311
312
313
314
315
async def get_crons(self, queue_name: str) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        return list(self._crons[queue_name].values())

store_workflow

store_workflow(
    queue_name: str, workflow_data: dict[str, Any]
) -> bool
Source code in src/sheppy/backend/memory.py
317
318
319
320
321
322
async def store_workflow(self, queue_name: str, workflow_data: dict[str, Any]) -> bool:
    self._check_connected()

    async with self._locks[queue_name]:
        self._workflows[queue_name][workflow_data["id"]] = workflow_data
        return True

get_workflows

get_workflows(
    queue_name: str, workflow_ids: list[str]
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/memory.py
324
325
326
327
328
329
330
331
332
333
async def get_workflows(self, queue_name: str, workflow_ids: list[str]) -> dict[str, dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        results = {}
        for wf_id in workflow_ids:
            result = self._workflows[queue_name].get(wf_id)
            if result:
                results[wf_id] = result
        return results

get_all_workflows

get_all_workflows(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
335
336
337
338
339
async def get_all_workflows(self, queue_name: str) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        return list(self._workflows[queue_name].values())

get_pending_workflows

get_pending_workflows(
    queue_name: str,
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
341
342
343
344
345
346
347
348
async def get_pending_workflows(self, queue_name: str) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        return [
            wf for wf in self._workflows[queue_name].values()
            if not wf.get("completed") and not wf.get("error")
        ]

delete_workflow

delete_workflow(queue_name: str, workflow_id: str) -> bool
Source code in src/sheppy/backend/memory.py
350
351
352
353
354
355
356
357
async def delete_workflow(self, queue_name: str, workflow_id: str) -> bool:
    self._check_connected()

    async with self._locks[queue_name]:
        if workflow_id in self._workflows[queue_name]:
            del self._workflows[queue_name][workflow_id]
            return True
        return False

mark_workflow_task_complete

mark_workflow_task_complete(
    queue_name: str, workflow_id: str, task_id: str
) -> int
Source code in src/sheppy/backend/memory.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
async def mark_workflow_task_complete(self, queue_name: str, workflow_id: str, task_id: str) -> int:
    self._check_connected()

    async with self._locks[queue_name]:
        workflow = self._workflows[queue_name].get(workflow_id)
        if not workflow:
            return -1

        pending_ids = workflow.get("pending_task_ids", [])
        if task_id not in pending_ids:
            return -1

        pending_ids = [tid for tid in pending_ids if tid != task_id]
        workflow["pending_task_ids"] = pending_ids

        return len(pending_ids)

acquire_rate_limit

acquire_rate_limit(
    queue_name: str,
    key: str,
    max_rate: int,
    rate_period: float,
    task_id: str,
    strategy: str = "sliding_window",
) -> float | None
Source code in src/sheppy/backend/memory.py
376
377
378
379
380
381
382
async def acquire_rate_limit(self, queue_name: str, key: str, max_rate: int, rate_period: float, task_id: str, strategy: str = "sliding_window") -> float | None:
    self._check_connected()

    if strategy == "fixed_window":
        return await self._acquire_fixed_window(queue_name, key, max_rate, rate_period)

    return await self._acquire_sliding_window(queue_name, key, max_rate, rate_period)