MemoryBackend class reference
In most cases, you don't need to interact with backends directly, as they are used internally by Queue. For usual usage, see the Queue reference and Getting Started guides to learn how to configure and use different backends.
If you need to implement a custom backend or want to understand how existing backends work, here's the reference information for the backend classes.
sheppy.MemoryBackend
MemoryBackend(
*,
instant_processing: bool = True,
dependency_overrides: dict[
Callable[..., Any], Callable[..., Any]
]
| None = None,
)
Bases: Backend
Source code in src/sheppy/backend/memory.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 | def __init__(self,
*,
instant_processing: bool = True,
dependency_overrides: dict[Callable[..., Any], Callable[..., Any]] | None = None,
) -> None:
self._task_metadata: dict[str, dict[str, dict[str, Any]]] = defaultdict(dict) # {QUEUE_NAME: {TASK_ID: task_data}}
self._pending: dict[str, list[str]] = defaultdict(list)
self._scheduled: dict[str, list[ScheduledTask]] = defaultdict(list)
self._crons: dict[str, dict[str, dict[str, Any]]] = defaultdict(dict)
self._locks: dict[str, asyncio.Lock] = defaultdict(asyncio.Lock) # for thread-safety
self._connected = False
self._instant_processing = instant_processing
self._task_processor = TaskProcessor(dependency_overrides=dependency_overrides)
self._worker_id = "MemoryBackend"
|
connect
Source code in src/sheppy/backend/memory.py
| async def connect(self) -> None:
self._connected = True
|
disconnect
Source code in src/sheppy/backend/memory.py
| async def disconnect(self) -> None:
self._connected = False
|
append
append(
queue_name: str,
tasks: list[dict[str, Any]],
unique: bool = True,
) -> list[bool]
Source code in src/sheppy/backend/memory.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 | async def append(self, queue_name: str, tasks: list[dict[str, Any]], unique: bool = True) -> list[bool]:
self._check_connected()
if unique:
success = await self._create_tasks(queue_name, tasks)
to_queue = [t for i, t in enumerate(tasks) if success[i]]
else:
success = [True] * len(tasks)
to_queue = tasks
async with self._locks[queue_name]:
for task in to_queue:
if not unique:
self._task_metadata[queue_name][task["id"]] = task
self._pending[queue_name].append(task["id"])
if self._instant_processing:
for i, task_data in enumerate(tasks):
if success[i]:
await self._process_task(queue_name, task_data)
return success
|
pop
pop(
queue_name: str,
limit: int = 1,
timeout: float | None = None,
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 | async def pop(self, queue_name: str, limit: int = 1, timeout: float | None = None) -> list[dict[str, Any]]:
self._check_connected()
start_time = asyncio.get_event_loop().time()
while True:
async with self._locks[queue_name]:
if self._pending[queue_name]:
tasks = []
q = self._pending[queue_name]
for _ in range(min(limit, len(q))):
task_id = q.pop(0)
task_data = self._task_metadata[queue_name].get(task_id)
if task_data:
tasks.append(task_data)
return tasks
if timeout is None or timeout <= 0:
return []
elapsed = asyncio.get_event_loop().time() - start_time
if elapsed >= timeout:
return []
await asyncio.sleep(min(0.05, timeout - elapsed))
|
get_pending
get_pending(
queue_name: str, count: int = 1
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
116
117
118
119
120
121
122
123
124
125
126
127 | async def get_pending(self, queue_name: str, count: int = 1) -> list[dict[str, Any]]:
self._check_connected()
async with self._locks[queue_name]:
task_ids = list(self._pending[queue_name])[:count]
tasks = []
for t in task_ids:
if task_data := self._task_metadata[queue_name].get(t):
tasks.append(task_data)
return tasks
|
size
size(queue_name: str) -> int
Source code in src/sheppy/backend/memory.py
| async def size(self, queue_name: str) -> int:
self._check_connected()
async with self._locks[queue_name]:
return len(self._pending[queue_name])
|
clear
clear(queue_name: str) -> int
Source code in src/sheppy/backend/memory.py
136
137
138
139
140
141
142
143
144
145
146
147
148 | async def clear(self, queue_name: str) -> int:
self._check_connected()
async with self._locks[queue_name]:
queue_size = len(self._task_metadata[queue_name])
queue_cron_size = len(self._crons[queue_name])
self._task_metadata[queue_name].clear()
self._pending[queue_name].clear()
self._scheduled[queue_name].clear()
self._crons[queue_name].clear()
return queue_size + queue_cron_size
|
get_tasks
get_tasks(
queue_name: str, task_ids: list[str]
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/memory.py
150
151
152
153
154
155
156
157
158
159
160 | async def get_tasks(self, queue_name: str, task_ids: list[str]) -> dict[str,dict[str, Any]]:
self._check_connected()
async with self._locks[queue_name]:
results = {}
for task_id in task_ids:
result = self._task_metadata[queue_name].get(task_id)
if result:
results[task_id] = result
return results
|
schedule
schedule(
queue_name: str,
task_data: dict[str, Any],
at: datetime,
unique: bool = True,
) -> bool
Source code in src/sheppy/backend/memory.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181 | async def schedule(self, queue_name: str, task_data: dict[str, Any], at: datetime, unique: bool = True) -> bool:
self._check_connected()
if unique:
success = await self._create_tasks(queue_name, [task_data])
if not success[0]:
return False
async with self._locks[queue_name]:
if not unique:
self._task_metadata[queue_name][task_data["id"]] = task_data
if self._instant_processing:
await self.append(queue_name, [task_data], unique=False)
else:
async with self._locks[queue_name]:
scheduled_task = ScheduledTask(at, task_data["id"])
heapq.heappush(self._scheduled[queue_name], scheduled_task)
return True
|
pop_scheduled
pop_scheduled(
queue_name: str, now: datetime | None = None
) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199 | async def pop_scheduled(self, queue_name: str, now: datetime | None = None) -> list[dict[str, Any]]:
self._check_connected()
if now is None:
now = datetime.now(timezone.utc)
async with self._locks[queue_name]:
tasks = []
scheduled_tasks = self._scheduled[queue_name]
while scheduled_tasks and scheduled_tasks[0].scheduled_time <= now:
scheduled_task = heapq.heappop(scheduled_tasks)
task_data = self._task_metadata[queue_name].get(scheduled_task.task_id)
if task_data:
tasks.append(task_data)
return tasks
|
store_result
store_result(
queue_name: str, task_data: dict[str, Any]
) -> bool
Source code in src/sheppy/backend/memory.py
201
202
203
204
205
206
207 | async def store_result(self, queue_name: str, task_data: dict[str, Any]) -> bool:
self._check_connected()
async with self._locks[queue_name]:
self._task_metadata[queue_name][task_data['id']] = task_data
return True
|
get_results
get_results(
queue_name: str,
task_ids: list[str],
timeout: float | None = None,
) -> dict[str, dict[str, Any]]
Source code in src/sheppy/backend/memory.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244 | async def get_results(self, queue_name: str, task_ids: list[str], timeout: float | None = None) -> dict[str,dict[str, Any]]:
self._check_connected()
start_time = asyncio.get_event_loop().time()
if not task_ids:
return {}
results = {}
remaining_ids = task_ids[:]
while True:
async with self._locks[queue_name]:
for task_id in task_ids:
task_data = self._task_metadata[queue_name].get(task_id, {})
if task_data.get("finished_at"):
results[task_id] = task_data
remaining_ids.remove(task_id)
if not remaining_ids:
return results
if timeout is None or timeout < 0:
return results
# endless wait if timeout == 0
if timeout == 0:
await asyncio.sleep(0.05)
continue
elapsed = asyncio.get_event_loop().time() - start_time
if elapsed >= timeout:
raise TimeoutError(f"Did not complete within {timeout} seconds")
await asyncio.sleep(min(0.05, timeout - elapsed))
|
get_stats
get_stats(queue_name: str) -> dict[str, int]
Source code in src/sheppy/backend/memory.py
246
247
248
249
250
251
252
253
254 | async def get_stats(self, queue_name: str) -> dict[str, int]:
self._check_connected()
async with self._locks[queue_name]:
return {
"pending": len(self._pending[queue_name]),
"completed": len([t for t in self._task_metadata[queue_name].values() if t["finished_at"]]),
"scheduled": len(self._scheduled[queue_name]),
}
|
get_all_tasks
get_all_tasks(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
| async def get_all_tasks(self, queue_name: str) -> list[dict[str, Any]]:
self._check_connected()
async with self._locks[queue_name]:
tasks = self._task_metadata[queue_name]
return list(tasks.values())
|
list_queues
list_queues() -> dict[str, int]
Source code in src/sheppy/backend/memory.py
263
264
265
266
267
268
269
270
271 | async def list_queues(self) -> dict[str, int]:
self._check_connected()
queues = {}
for queue_name in self._task_metadata:
async with self._locks[queue_name]:
queues[queue_name] = len(self._pending[queue_name])
return queues
|
get_scheduled
get_scheduled(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
273
274
275
276
277
278
279
280
281
282
283 | async def get_scheduled(self, queue_name: str) -> list[dict[str, Any]]:
self._check_connected()
async with self._locks[queue_name]:
tasks = []
for scheduled_task in self._scheduled[queue_name]:
task_data = self._task_metadata[queue_name].get(scheduled_task.task_id)
if task_data:
tasks.append(task_data)
return tasks
|
add_cron
add_cron(
queue_name: str,
deterministic_id: str,
task_cron: dict[str, Any],
) -> bool
Source code in src/sheppy/backend/memory.py
285
286
287
288
289
290
291
292 | async def add_cron(self, queue_name: str, deterministic_id: str, task_cron: dict[str, Any]) -> bool:
self._check_connected()
async with self._locks[queue_name]:
if deterministic_id not in self._crons[queue_name]:
self._crons[queue_name][deterministic_id] = task_cron
return True
return False
|
delete_cron
delete_cron(queue_name: str, deterministic_id: str) -> bool
Source code in src/sheppy/backend/memory.py
294
295
296
297
298
299
300
301 | async def delete_cron(self, queue_name: str, deterministic_id: str) -> bool:
self._check_connected()
async with self._locks[queue_name]:
if deterministic_id in self._crons[queue_name]:
del self._crons[queue_name][deterministic_id]
return True
return False
|
get_crons
get_crons(queue_name: str) -> list[dict[str, Any]]
Source code in src/sheppy/backend/memory.py
| async def get_crons(self, queue_name: str) -> list[dict[str, Any]]:
self._check_connected()
async with self._locks[queue_name]:
return list(self._crons[queue_name].values())
|