Skip to content

MemoryBackend class reference

In most cases, you don't need to interact with backends directly, as they are used internally by Queue. For usual usage, see the Queue reference and Getting Started guides to learn how to configure and use different backends.

If you need to implement a custom backend or want to understand how existing backends work, here's the reference information for the backend classes.

sheppy.MemoryBackend

MemoryBackend()

Bases: Backend

Source code in src/sheppy/backend/memory.py
19
20
21
22
23
24
25
26
def __init__(self) -> None:
    self._task_metadata: dict[str, dict[str, dict[str, Any]]] = defaultdict(dict)  # {QUEUE_NAME: {TASK_ID: task_data}}
    self._pending: dict[str, list[str]] = defaultdict(list)
    self._scheduled: dict[str, list[ScheduledTask]] = defaultdict(list)
    self._crons: dict[str, dict[str, dict[str, Any]]] = defaultdict(dict)

    self._locks: dict[str, asyncio.Lock] = defaultdict(asyncio.Lock)  # for thread-safety
    self._connected = False

is_connected

is_connected: bool

connect

connect() -> None
Source code in src/sheppy/backend/memory.py
28
29
async def connect(self) -> None:
    self._connected = True

disconnect

disconnect() -> None
Source code in src/sheppy/backend/memory.py
31
32
async def disconnect(self) -> None:
    self._connected = False

append

append(
    queue_name: str,
    tasks: list[dict[str, Any]],
    unique: bool = True,
) -> list[bool]
Source code in src/sheppy/backend/memory.py
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
async def append(self, queue_name: str, tasks: list[dict[str, Any]], unique: bool = True) -> list[bool]:
    self._check_connected()

    if unique:
        success = await self._create_tasks(queue_name, tasks)
        to_queue = [t for i, t in enumerate(tasks) if success[i]]
    else:
        success = [True] * len(tasks)
        to_queue = tasks

    async with self._locks[queue_name]:
        for task in to_queue:
            if not unique:
                self._task_metadata[queue_name][task["id"]] = task

            self._pending[queue_name].append(task["id"])

        return success

pop

pop(
    queue_name: str,
    limit: int = 1,
    timeout: float | None = None,
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
async def pop(self, queue_name: str, limit: int = 1, timeout: float | None = None) -> list[dict[str, Any]]:
    self._check_connected()

    start_time = asyncio.get_event_loop().time()

    while True:
        async with self._locks[queue_name]:
            if self._pending[queue_name]:
                tasks = []
                q = self._pending[queue_name]

                for _ in range(min(limit, len(q))):
                    task_id = q.pop(0)
                    task_data = self._task_metadata[queue_name].get(task_id)
                    if task_data:
                        tasks.append(task_data)

                return tasks

        if timeout is None or timeout <= 0:
            return []

        elapsed = asyncio.get_event_loop().time() - start_time
        if elapsed >= timeout:
            return []

        await asyncio.sleep(min(0.05, timeout - elapsed))

get_pending

get_pending(
    queue_name: str, count: int = 1
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
 99
100
101
102
103
104
105
106
107
108
109
110
async def get_pending(self, queue_name: str, count: int = 1) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        task_ids = list(self._pending[queue_name])[:count]

        tasks = []
        for t in task_ids:
            if task_data := self._task_metadata[queue_name].get(t):
                tasks.append(task_data)

        return tasks

size

size(queue_name: str) -> int
Source code in src/sheppy/backend/memory.py
113
114
115
116
117
async def size(self, queue_name: str) -> int:
    self._check_connected()

    async with self._locks[queue_name]:
        return len(self._pending[queue_name])

clear

clear(queue_name: str) -> int
Source code in src/sheppy/backend/memory.py
119
120
121
122
123
124
125
126
127
128
129
130
131
async def clear(self, queue_name: str) -> int:
    self._check_connected()

    async with self._locks[queue_name]:
        queue_size = len(self._task_metadata[queue_name])
        queue_cron_size = len(self._crons[queue_name])

        self._task_metadata[queue_name].clear()
        self._pending[queue_name].clear()
        self._scheduled[queue_name].clear()
        self._crons[queue_name].clear()

        return queue_size + queue_cron_size

get_tasks

get_tasks(
    queue_name: str, task_ids: list[str]
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/memory.py
133
134
135
136
137
138
139
140
141
142
143
async def get_tasks(self, queue_name: str, task_ids: list[str]) -> dict[str,dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        results = {}
        for task_id in task_ids:
            result = self._task_metadata[queue_name].get(task_id)
            if result:
                results[task_id] = result

        return results

schedule

schedule(
    queue_name: str,
    task_data: dict[str, Any],
    at: datetime,
    unique: bool = True,
) -> bool
Source code in src/sheppy/backend/memory.py
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
async def schedule(self, queue_name: str, task_data: dict[str, Any], at: datetime, unique: bool = True) -> bool:
    self._check_connected()

    if unique:
        success = await self._create_tasks(queue_name, [task_data])
        if not success[0]:
            return False

    async with self._locks[queue_name]:
        if not unique:
            self._task_metadata[queue_name][task_data["id"]] = task_data

        scheduled_task = ScheduledTask(at, task_data["id"])
        heapq.heappush(self._scheduled[queue_name], scheduled_task)

        return True

pop_scheduled

pop_scheduled(
    queue_name: str, now: datetime | None = None
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
async def pop_scheduled(self, queue_name: str, now: datetime | None = None) -> list[dict[str, Any]]:
    self._check_connected()

    if now is None:
        now = datetime.now(timezone.utc)

    async with self._locks[queue_name]:
        tasks = []
        scheduled_tasks = self._scheduled[queue_name]

        while scheduled_tasks and scheduled_tasks[0].scheduled_time <= now:
            scheduled_task = heapq.heappop(scheduled_tasks)
            task_data = self._task_metadata[queue_name].get(scheduled_task.task_id)
            if task_data:
                tasks.append(task_data)

        return tasks

store_result

store_result(
    queue_name: str, task_data: dict[str, Any]
) -> bool
Source code in src/sheppy/backend/memory.py
180
181
182
183
184
185
186
async def store_result(self, queue_name: str, task_data: dict[str, Any]) -> bool:
    self._check_connected()

    async with self._locks[queue_name]:
        self._task_metadata[queue_name][task_data['id']] = task_data

        return True

get_results

get_results(
    queue_name: str,
    task_ids: list[str],
    timeout: float | None = None,
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/memory.py
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
async def get_results(self, queue_name: str, task_ids: list[str], timeout: float | None = None) -> dict[str,dict[str, Any]]:
    self._check_connected()

    start_time = asyncio.get_event_loop().time()

    if not task_ids:
        return {}

    results = {}
    remaining_ids = task_ids[:]

    while True:
        async with self._locks[queue_name]:
            for task_id in task_ids:
                task_data = self._task_metadata[queue_name].get(task_id, {})

                if task_data.get("finished_at"):
                    results[task_id] = task_data
                    remaining_ids.remove(task_id)

        if not remaining_ids:
            return results

        if timeout is None or timeout < 0:
            return results

        # endless wait if timeout == 0
        if timeout == 0:
            await asyncio.sleep(0.05)
            continue

        elapsed = asyncio.get_event_loop().time() - start_time
        if elapsed >= timeout:
            raise TimeoutError(f"Did not complete within {timeout} seconds")

        await asyncio.sleep(min(0.05, timeout - elapsed))

get_stats

get_stats(queue_name: str) -> dict[str, int]
Source code in src/sheppy/backend/memory.py
225
226
227
228
229
230
231
232
233
async def get_stats(self, queue_name: str) -> dict[str, int]:
    self._check_connected()

    async with self._locks[queue_name]:
        return {
            "pending": len(self._pending[queue_name]),
            "completed": len([t for t in self._task_metadata[queue_name].values() if t["finished_at"]]),
            "scheduled": len(self._scheduled[queue_name]),
        }

get_all_tasks

get_all_tasks(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
235
236
237
238
239
240
async def get_all_tasks(self, queue_name: str) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        tasks = self._task_metadata[queue_name]
        return list(tasks.values())

list_queues

list_queues() -> dict[str, int]
Source code in src/sheppy/backend/memory.py
242
243
244
245
246
247
248
249
250
async def list_queues(self) -> dict[str, int]:
    self._check_connected()

    queues = {}
    for queue_name in self._task_metadata:
        async with self._locks[queue_name]:
            queues[queue_name] = len(self._pending[queue_name])

    return queues

get_scheduled

get_scheduled(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
252
253
254
255
256
257
258
259
260
261
262
async def get_scheduled(self, queue_name: str) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        tasks = []
        for scheduled_task in self._scheduled[queue_name]:
            task_data = self._task_metadata[queue_name].get(scheduled_task.task_id)
            if task_data:
                tasks.append(task_data)

        return tasks

add_cron

add_cron(
    queue_name: str,
    deterministic_id: str,
    task_cron: dict[str, Any],
) -> bool
Source code in src/sheppy/backend/memory.py
264
265
266
267
268
269
270
271
async def add_cron(self, queue_name: str, deterministic_id: str, task_cron: dict[str, Any]) -> bool:
    self._check_connected()

    async with self._locks[queue_name]:
        if deterministic_id not in self._crons[queue_name]:
            self._crons[queue_name][deterministic_id] = task_cron
            return True
        return False

delete_cron

delete_cron(queue_name: str, deterministic_id: str) -> bool
Source code in src/sheppy/backend/memory.py
273
274
275
276
277
278
279
280
async def delete_cron(self, queue_name: str, deterministic_id: str) -> bool:
    self._check_connected()

    async with self._locks[queue_name]:
        if deterministic_id in self._crons[queue_name]:
            del self._crons[queue_name][deterministic_id]
            return True
        return False

get_crons

get_crons(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
282
283
284
285
286
async def get_crons(self, queue_name: str) -> list[dict[str, Any]]:
    self._check_connected()

    async with self._locks[queue_name]:
        return list(self._crons[queue_name].values())