|
|
@ -1,23 +1,24 @@
|
|
|
|
import uuid
|
|
|
|
import uuid
|
|
|
|
import sys
|
|
|
|
import sys
|
|
|
|
import warnings
|
|
|
|
import warnings
|
|
|
|
import typing as t
|
|
|
|
|
|
|
|
import logging
|
|
|
|
import logging
|
|
|
|
from collections import namedtuple
|
|
|
|
from collections import namedtuple
|
|
|
|
from datetime import datetime, timezone
|
|
|
|
from datetime import datetime, timezone, timedelta
|
|
|
|
from functools import total_ordering
|
|
|
|
from functools import total_ordering
|
|
|
|
|
|
|
|
from typing import TYPE_CHECKING, Dict, List, Any, Callable, Optional, Tuple, Type, Union
|
|
|
|
from redis import WatchError
|
|
|
|
from redis import WatchError
|
|
|
|
|
|
|
|
|
|
|
|
if t.TYPE_CHECKING:
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
from redis import Redis
|
|
|
|
from redis import Redis
|
|
|
|
from redis.client import Pipeline
|
|
|
|
from redis.client import Pipeline
|
|
|
|
|
|
|
|
from .job import Retry
|
|
|
|
|
|
|
|
|
|
|
|
from .utils import as_text
|
|
|
|
from .utils import as_text
|
|
|
|
from .connections import resolve_connection
|
|
|
|
from .connections import resolve_connection
|
|
|
|
from .defaults import DEFAULT_RESULT_TTL
|
|
|
|
from .defaults import DEFAULT_RESULT_TTL
|
|
|
|
from .exceptions import DequeueTimeout, NoSuchJobError
|
|
|
|
from .exceptions import DequeueTimeout, NoSuchJobError
|
|
|
|
from .job import Job, JobStatus
|
|
|
|
from .job import Job, JobStatus
|
|
|
|
|
|
|
|
from .types import FunctionReferenceType, JobDependencyType
|
|
|
|
from .serializers import resolve_serializer
|
|
|
|
from .serializers import resolve_serializer
|
|
|
|
from .utils import backend_class, get_version, import_attribute, make_colorizer, parse_timeout, utcnow, compact
|
|
|
|
from .utils import backend_class, get_version, import_attribute, make_colorizer, parse_timeout, utcnow, compact
|
|
|
|
|
|
|
|
|
|
|
@ -43,14 +44,22 @@ class EnqueueData(namedtuple('EnqueueData', ["func", "args", "kwargs", "timeout"
|
|
|
|
|
|
|
|
|
|
|
|
@total_ordering
|
|
|
|
@total_ordering
|
|
|
|
class Queue:
|
|
|
|
class Queue:
|
|
|
|
job_class: t.Type['Job'] = Job
|
|
|
|
job_class: Type['Job'] = Job
|
|
|
|
DEFAULT_TIMEOUT: int = 180 # Default timeout seconds.
|
|
|
|
DEFAULT_TIMEOUT: int = 180 # Default timeout seconds.
|
|
|
|
redis_queue_namespace_prefix: str = 'rq:queue:'
|
|
|
|
redis_queue_namespace_prefix: str = 'rq:queue:'
|
|
|
|
redis_queues_keys: str = 'rq:queues'
|
|
|
|
redis_queues_keys: str = 'rq:queues'
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
@classmethod
|
|
|
|
def all(cls, connection: t.Optional['Redis'] = None, job_class: t.Optional[t.Type['Job']] = None, serializer=None):
|
|
|
|
def all(cls, connection: Optional['Redis'] = None, job_class: Optional[Type['Job']] = None, serializer=None) -> List['Queue']:
|
|
|
|
"""Returns an iterable of all Queues.
|
|
|
|
"""Returns an iterable of all Queues.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
connection (Optional[Redis], optional): The Redis Connection. Defaults to None.
|
|
|
|
|
|
|
|
job_class (Optional[Job], optional): The Job class to use. Defaults to None.
|
|
|
|
|
|
|
|
serializer (optional): The serializer to use. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
queues (List[Queue]): A list of all queues.
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
connection = resolve_connection(connection)
|
|
|
|
connection = resolve_connection(connection)
|
|
|
|
|
|
|
|
|
|
|
@ -59,16 +68,28 @@ class Queue:
|
|
|
|
connection=connection,
|
|
|
|
connection=connection,
|
|
|
|
job_class=job_class, serializer=serializer)
|
|
|
|
job_class=job_class, serializer=serializer)
|
|
|
|
|
|
|
|
|
|
|
|
return [to_queue(rq_key)
|
|
|
|
all_registerd_queues = connection.smembers(cls.redis_queues_keys)
|
|
|
|
for rq_key in connection.smembers(cls.redis_queues_keys)
|
|
|
|
all_queues = [to_queue(rq_key) for rq_key in all_registerd_queues if rq_key]
|
|
|
|
if rq_key]
|
|
|
|
return all_queues
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
@classmethod
|
|
|
|
def from_queue_key(cls, queue_key, connection: t.Optional['Redis'] = None,
|
|
|
|
def from_queue_key(cls, queue_key: str, connection: Optional['Redis'] = None,
|
|
|
|
job_class: t.Optional[t.Type['Job']] = None, serializer=None):
|
|
|
|
job_class: Optional['Job'] = None, serializer: Any = None) -> 'Queue':
|
|
|
|
"""Returns a Queue instance, based on the naming conventions for naming
|
|
|
|
"""Returns a Queue instance, based on the naming conventions for naming
|
|
|
|
the internal Redis keys. Can be used to reverse-lookup Queues by their
|
|
|
|
the internal Redis keys. Can be used to reverse-lookup Queues by their
|
|
|
|
Redis keys.
|
|
|
|
Redis keys.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
queue_key (str): The queue key
|
|
|
|
|
|
|
|
connection (Optional[Redis], optional): Redis connection. Defaults to None.
|
|
|
|
|
|
|
|
job_class (Optional[Job], optional): Job class. Defaults to None.
|
|
|
|
|
|
|
|
serializer (Any, optional): Serializer. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
|
|
|
ValueError: If the queue_key doesn't start with the defined prefix
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
queue (Queue): The Queue object
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
prefix = cls.redis_queue_namespace_prefix
|
|
|
|
prefix = cls.redis_queue_namespace_prefix
|
|
|
|
if not queue_key.startswith(prefix):
|
|
|
|
if not queue_key.startswith(prefix):
|
|
|
@ -76,8 +97,19 @@ class Queue:
|
|
|
|
name = queue_key[len(prefix):]
|
|
|
|
name = queue_key[len(prefix):]
|
|
|
|
return cls(name, connection=connection, job_class=job_class, serializer=serializer)
|
|
|
|
return cls(name, connection=connection, job_class=job_class, serializer=serializer)
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, name='default', default_timeout=None, connection: t.Optional['Redis'] = None,
|
|
|
|
def __init__(self, name: str = 'default', default_timeout: Optional[int] = None, connection: Optional['Redis'] = None,
|
|
|
|
is_async=True, job_class=None, serializer=None, **kwargs):
|
|
|
|
is_async: bool = True, job_class: Union[str, Type['Job'], None] = None, serializer: Any = None, **kwargs):
|
|
|
|
|
|
|
|
"""Initializes a Queue object.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
name (str, optional): The queue name. Defaults to 'default'.
|
|
|
|
|
|
|
|
default_timeout (Optional[int], optional): Queue's default timeout. Defaults to None.
|
|
|
|
|
|
|
|
connection (Optional[Redis], optional): Redis connection. Defaults to None.
|
|
|
|
|
|
|
|
is_async (bool, optional): Whether jobs should run "async" (using the worker).
|
|
|
|
|
|
|
|
If `is_async` is false, jobs will run on the same process from where it was called. Defaults to True.
|
|
|
|
|
|
|
|
job_class (Union[str, 'Job', optional): Job class or a string referencing the Job class path. Defaults to None.
|
|
|
|
|
|
|
|
serializer (Any, optional): Serializer. Defaults to None.
|
|
|
|
|
|
|
|
"""
|
|
|
|
self.connection = resolve_connection(connection)
|
|
|
|
self.connection = resolve_connection(connection)
|
|
|
|
prefix = self.redis_queue_namespace_prefix
|
|
|
|
prefix = self.redis_queue_namespace_prefix
|
|
|
|
self.name = name
|
|
|
|
self.name = name
|
|
|
@ -97,7 +129,7 @@ class Queue:
|
|
|
|
self.job_class = job_class
|
|
|
|
self.job_class = job_class
|
|
|
|
|
|
|
|
|
|
|
|
self.serializer = resolve_serializer(serializer)
|
|
|
|
self.serializer = resolve_serializer(serializer)
|
|
|
|
self.redis_server_version = None
|
|
|
|
self.redis_server_version: Optional[Tuple[int, int, int]] = None
|
|
|
|
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
def __len__(self):
|
|
|
|
return self.count
|
|
|
|
return self.count
|
|
|
@ -111,8 +143,12 @@ class Queue:
|
|
|
|
def __iter__(self):
|
|
|
|
def __iter__(self):
|
|
|
|
yield self
|
|
|
|
yield self
|
|
|
|
|
|
|
|
|
|
|
|
def get_redis_server_version(self):
|
|
|
|
def get_redis_server_version(self) -> Tuple[int, int, int]:
|
|
|
|
"""Return Redis server version of connection"""
|
|
|
|
"""Return Redis server version of connection
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
redis_version (Tuple): A tuple with the parsed Redis version (eg: (5,0,0))
|
|
|
|
|
|
|
|
"""
|
|
|
|
if not self.redis_server_version:
|
|
|
|
if not self.redis_server_version:
|
|
|
|
self.redis_server_version = get_version(self.connection)
|
|
|
|
self.redis_server_version = get_version(self.connection)
|
|
|
|
return self.redis_server_version
|
|
|
|
return self.redis_server_version
|
|
|
@ -127,14 +163,28 @@ class Queue:
|
|
|
|
"""Redis key used to indicate this queue has been cleaned."""
|
|
|
|
"""Redis key used to indicate this queue has been cleaned."""
|
|
|
|
return 'rq:clean_registries:%s' % self.name
|
|
|
|
return 'rq:clean_registries:%s' % self.name
|
|
|
|
|
|
|
|
|
|
|
|
def acquire_cleaning_lock(self):
|
|
|
|
def acquire_cleaning_lock(self) -> bool:
|
|
|
|
"""Returns a boolean indicating whether a lock to clean this queue
|
|
|
|
"""Returns a boolean indicating whether a lock to clean this queue
|
|
|
|
is acquired. A lock expires in 899 seconds (15 minutes - 1 second)
|
|
|
|
is acquired. A lock expires in 899 seconds (15 minutes - 1 second)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
lock_acquired (bool)
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
return self.connection.set(self.registry_cleaning_key, 1, nx=1, ex=899)
|
|
|
|
lock_acquired = self.connection.set(self.registry_cleaning_key, 1, nx=1, ex=899)
|
|
|
|
|
|
|
|
if not lock_acquired:
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
return lock_acquired
|
|
|
|
|
|
|
|
|
|
|
|
def empty(self):
|
|
|
|
def empty(self):
|
|
|
|
"""Removes all messages on the queue."""
|
|
|
|
"""Removes all messages on the queue.
|
|
|
|
|
|
|
|
This is currently being done using a Lua script,
|
|
|
|
|
|
|
|
which iterates all queue messages and deletes the jobs and it's dependents.
|
|
|
|
|
|
|
|
It registers the Lua script and calls it.
|
|
|
|
|
|
|
|
Even though is currently being returned, this is not strictly necessary.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
script (...): The Lua Script is called.
|
|
|
|
|
|
|
|
"""
|
|
|
|
script = """
|
|
|
|
script = """
|
|
|
|
local prefix = "{0}"
|
|
|
|
local prefix = "{0}"
|
|
|
|
local q = KEYS[1]
|
|
|
|
local q = KEYS[1]
|
|
|
@ -156,7 +206,11 @@ class Queue:
|
|
|
|
return script(keys=[self.key])
|
|
|
|
return script(keys=[self.key])
|
|
|
|
|
|
|
|
|
|
|
|
def delete(self, delete_jobs: bool = True):
|
|
|
|
def delete(self, delete_jobs: bool = True):
|
|
|
|
"""Deletes the queue. If delete_jobs is true it removes all the associated messages on the queue first."""
|
|
|
|
"""Deletes the queue.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
delete_jobs (bool): If true, removes all the associated messages on the queue first.
|
|
|
|
|
|
|
|
"""
|
|
|
|
if delete_jobs:
|
|
|
|
if delete_jobs:
|
|
|
|
self.empty()
|
|
|
|
self.empty()
|
|
|
|
|
|
|
|
|
|
|
@ -165,8 +219,12 @@ class Queue:
|
|
|
|
pipeline.delete(self._key)
|
|
|
|
pipeline.delete(self._key)
|
|
|
|
pipeline.execute()
|
|
|
|
pipeline.execute()
|
|
|
|
|
|
|
|
|
|
|
|
def is_empty(self):
|
|
|
|
def is_empty(self) -> bool:
|
|
|
|
"""Returns whether the current queue is empty."""
|
|
|
|
"""Returns whether the current queue is empty.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
is_empty (bool): Whether the queue is empty
|
|
|
|
|
|
|
|
"""
|
|
|
|
return self.count == 0
|
|
|
|
return self.count == 0
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
@property
|
|
|
@ -174,7 +232,17 @@ class Queue:
|
|
|
|
"""Returns whether the current queue is async."""
|
|
|
|
"""Returns whether the current queue is async."""
|
|
|
|
return bool(self._is_async)
|
|
|
|
return bool(self._is_async)
|
|
|
|
|
|
|
|
|
|
|
|
def fetch_job(self, job_id: str):
|
|
|
|
def fetch_job(self, job_id: str) -> Optional['Job']:
|
|
|
|
|
|
|
|
"""Fetch a single job by Job ID.
|
|
|
|
|
|
|
|
If the job key is not found, will run the `remove` method, to exclude the key.
|
|
|
|
|
|
|
|
If the job has the same name as as the current job origin, returns the Job
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job_id (str): The Job ID
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
job (Optional[Job]): The job if found
|
|
|
|
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
job = self.job_class.fetch(job_id, connection=self.connection, serializer=self.serializer)
|
|
|
|
job = self.job_class.fetch(job_id, connection=self.connection, serializer=self.serializer)
|
|
|
|
except NoSuchJobError:
|
|
|
|
except NoSuchJobError:
|
|
|
@ -183,13 +251,19 @@ class Queue:
|
|
|
|
if job.origin == self.name:
|
|
|
|
if job.origin == self.name:
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
def get_job_position(self, job_or_id: t.Union[Job, str]):
|
|
|
|
def get_job_position(self, job_or_id: Union['Job', str]) -> Optional[int]:
|
|
|
|
"""Returns the position of a job within the queue
|
|
|
|
"""Returns the position of a job within the queue
|
|
|
|
|
|
|
|
|
|
|
|
Using Redis before 6.0.6 and redis-py before 3.5.4 has a complexity of
|
|
|
|
Using Redis before 6.0.6 and redis-py before 3.5.4 has a complexity of
|
|
|
|
worse than O(N) and should not be used for very long job queues. Redis
|
|
|
|
worse than O(N) and should not be used for very long job queues. Redis
|
|
|
|
and redis-py version afterwards should support the LPOS command
|
|
|
|
and redis-py version afterwards should support the LPOS command
|
|
|
|
handling job positions within Redis c implementation.
|
|
|
|
handling job positions within Redis c implementation.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job_or_id (Union[Job, str]): The Job instance or Job ID
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
_type_: _description_
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
job_id = job_or_id.id if isinstance(job_or_id, self.job_class) else job_or_id
|
|
|
|
job_id = job_or_id.id if isinstance(job_or_id, self.job_class) else job_or_id
|
|
|
|
|
|
|
|
|
|
|
@ -204,8 +278,16 @@ class Queue:
|
|
|
|
return self.job_ids.index(job_id)
|
|
|
|
return self.job_ids.index(job_id)
|
|
|
|
return None
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
def get_job_ids(self, offset: int = 0, length: int = -1):
|
|
|
|
def get_job_ids(self, offset: int = 0, length: int = -1) -> List[str]:
|
|
|
|
"""Returns a slice of job IDs in the queue."""
|
|
|
|
"""Returns a slice of job IDs in the queue.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
offset (int, optional): The offset. Defaults to 0.
|
|
|
|
|
|
|
|
length (int, optional): The slice length. Defaults to -1 (last element).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
_type_: _description_
|
|
|
|
|
|
|
|
"""
|
|
|
|
start = offset
|
|
|
|
start = offset
|
|
|
|
if length >= 0:
|
|
|
|
if length >= 0:
|
|
|
|
end = offset + (length - 1)
|
|
|
|
end = offset + (length - 1)
|
|
|
@ -219,18 +301,26 @@ class Queue:
|
|
|
|
self.log.debug(f"Getting jobs for queue {green(self.name)}: {len(job_ids)} found.")
|
|
|
|
self.log.debug(f"Getting jobs for queue {green(self.name)}: {len(job_ids)} found.")
|
|
|
|
return job_ids
|
|
|
|
return job_ids
|
|
|
|
|
|
|
|
|
|
|
|
def get_jobs(self, offset: int = 0, length: int = -1):
|
|
|
|
def get_jobs(self, offset: int = 0, length: int = -1) -> List['Job']:
|
|
|
|
"""Returns a slice of jobs in the queue."""
|
|
|
|
"""Returns a slice of jobs in the queue.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
offset (int, optional): The offset. Defaults to 0.
|
|
|
|
|
|
|
|
length (int, optional): The slice length. Defaults to -1.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
_type_: _description_
|
|
|
|
|
|
|
|
"""
|
|
|
|
job_ids = self.get_job_ids(offset, length)
|
|
|
|
job_ids = self.get_job_ids(offset, length)
|
|
|
|
return compact([self.fetch_job(job_id) for job_id in job_ids])
|
|
|
|
return compact([self.fetch_job(job_id) for job_id in job_ids])
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
@property
|
|
|
|
def job_ids(self) -> t.List[str]:
|
|
|
|
def job_ids(self) -> List[str]:
|
|
|
|
"""Returns a list of all job IDS in the queue."""
|
|
|
|
"""Returns a list of all job IDS in the queue."""
|
|
|
|
return self.get_job_ids()
|
|
|
|
return self.get_job_ids()
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
@property
|
|
|
|
def jobs(self) -> t.List['Job']:
|
|
|
|
def jobs(self) -> List['Job']:
|
|
|
|
"""Returns a list of all (valid) jobs in the queue."""
|
|
|
|
"""Returns a list of all (valid) jobs in the queue."""
|
|
|
|
return self.get_jobs()
|
|
|
|
return self.get_jobs()
|
|
|
|
|
|
|
|
|
|
|
@ -276,8 +366,16 @@ class Queue:
|
|
|
|
from rq.registry import CanceledJobRegistry
|
|
|
|
from rq.registry import CanceledJobRegistry
|
|
|
|
return CanceledJobRegistry(queue=self, job_class=self.job_class, serializer=self.serializer)
|
|
|
|
return CanceledJobRegistry(queue=self, job_class=self.job_class, serializer=self.serializer)
|
|
|
|
|
|
|
|
|
|
|
|
def remove(self, job_or_id: t.Union['Job', str], pipeline: t.Optional['Pipeline'] = None):
|
|
|
|
def remove(self, job_or_id: Union['Job', str], pipeline: Optional['Pipeline'] = None):
|
|
|
|
"""Removes Job from queue, accepts either a Job instance or ID."""
|
|
|
|
"""Removes Job from queue, accepts either a Job instance or ID.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job_or_id (Union[Job, str]): The Job instance or Job ID string.
|
|
|
|
|
|
|
|
pipeline (Optional[Pipeline], optional): The Redis Pipeline. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
_type_: _description_
|
|
|
|
|
|
|
|
"""
|
|
|
|
job_id = job_or_id.id if isinstance(job_or_id, self.job_class) else job_or_id
|
|
|
|
job_id = job_or_id.id if isinstance(job_or_id, self.job_class) else job_or_id
|
|
|
|
|
|
|
|
|
|
|
|
if pipeline is not None:
|
|
|
|
if pipeline is not None:
|
|
|
@ -286,8 +384,8 @@ class Queue:
|
|
|
|
return self.connection.lrem(self.key, 1, job_id)
|
|
|
|
return self.connection.lrem(self.key, 1, job_id)
|
|
|
|
|
|
|
|
|
|
|
|
def compact(self):
|
|
|
|
def compact(self):
|
|
|
|
"""Removes all "dead" jobs from the queue by cycling through it, while
|
|
|
|
"""Removes all "dead" jobs from the queue by cycling through it,
|
|
|
|
guaranteeing FIFO semantics.
|
|
|
|
while guaranteeing FIFO semantics.
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
COMPACT_QUEUE = '{0}_compact:{1}'.format(
|
|
|
|
COMPACT_QUEUE = '{0}_compact:{1}'.format(
|
|
|
|
self.redis_queue_namespace_prefix, uuid.uuid4()) # noqa
|
|
|
|
self.redis_queue_namespace_prefix, uuid.uuid4()) # noqa
|
|
|
@ -300,9 +398,15 @@ class Queue:
|
|
|
|
if self.job_class.exists(job_id, self.connection):
|
|
|
|
if self.job_class.exists(job_id, self.connection):
|
|
|
|
self.connection.rpush(self.key, job_id)
|
|
|
|
self.connection.rpush(self.key, job_id)
|
|
|
|
|
|
|
|
|
|
|
|
def push_job_id(self, job_id: str, pipeline: t.Optional['Pipeline'] = None, at_front=False):
|
|
|
|
def push_job_id(self, job_id: str, pipeline: Optional['Pipeline'] = None, at_front: bool = False):
|
|
|
|
"""Pushes a job ID on the corresponding Redis queue.
|
|
|
|
"""Pushes a job ID on the corresponding Redis queue.
|
|
|
|
'at_front' allows you to push the job onto the front instead of the back of the queue"""
|
|
|
|
'at_front' allows you to push the job onto the front instead of the back of the queue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job_id (str): The Job ID
|
|
|
|
|
|
|
|
pipeline (Optional[Pipeline], optional): The Redis Pipeline to use. Defaults to None.
|
|
|
|
|
|
|
|
at_front (bool, optional): Whether to push the job to front of the queue. Defaults to False.
|
|
|
|
|
|
|
|
"""
|
|
|
|
connection = pipeline if pipeline is not None else self.connection
|
|
|
|
connection = pipeline if pipeline is not None else self.connection
|
|
|
|
if at_front:
|
|
|
|
if at_front:
|
|
|
|
result = connection.lpush(self.key, job_id)
|
|
|
|
result = connection.lpush(self.key, job_id)
|
|
|
@ -310,12 +414,38 @@ class Queue:
|
|
|
|
result = connection.rpush(self.key, job_id)
|
|
|
|
result = connection.rpush(self.key, job_id)
|
|
|
|
self.log.debug(f"Pushed job {blue(job_id)} into {green(self.name)}, {result} job(s) are in queue.")
|
|
|
|
self.log.debug(f"Pushed job {blue(job_id)} into {green(self.name)}, {result} job(s) are in queue.")
|
|
|
|
|
|
|
|
|
|
|
|
def create_job(self, func: t.Callable[..., t.Any], args=None, kwargs=None, timeout=None,
|
|
|
|
def create_job(self, func: 'FunctionReferenceType', args: Union[Tuple, List, None] = None, kwargs: Optional[Dict] = None,
|
|
|
|
result_ttl=None, ttl=None, failure_ttl=None,
|
|
|
|
timeout: Optional[int] = None, result_ttl: Optional[int] = None, ttl: Optional[int] = None,
|
|
|
|
description=None, depends_on=None, job_id=None,
|
|
|
|
failure_ttl: Optional[int] = None, description: Optional[str] = None, depends_on: Optional['JobDependencyType']=None,
|
|
|
|
meta=None, status=JobStatus.QUEUED, retry=None, *,
|
|
|
|
job_id: Optional[str] = None, meta: Optional[Dict] = None, status: JobStatus = JobStatus.QUEUED,
|
|
|
|
on_success=None, on_failure=None) -> Job:
|
|
|
|
retry: Optional['Retry'] = None, *, on_success: Optional[Callable] = None,
|
|
|
|
"""Creates a job based on parameters given."""
|
|
|
|
on_failure: Optional[Callable] = None) -> Job:
|
|
|
|
|
|
|
|
"""Creates a job based on parameters given
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
func (FunctionReferenceType): The function referce: a callable or the path.
|
|
|
|
|
|
|
|
args (Union[Tuple, List, None], optional): The `*args` to pass to the function. Defaults to None.
|
|
|
|
|
|
|
|
kwargs (Optional[Dict], optional): The `**kwargs` to pass to the function. Defaults to None.
|
|
|
|
|
|
|
|
timeout (Optional[int], optional): Function timeout. Defaults to None.
|
|
|
|
|
|
|
|
result_ttl (Optional[int], optional): Result time to live. Defaults to None.
|
|
|
|
|
|
|
|
ttl (Optional[int], optional): Time to live. Defaults to None.
|
|
|
|
|
|
|
|
failure_ttl (Optional[int], optional): Failure time to live. Defaults to None.
|
|
|
|
|
|
|
|
description (Optional[str], optional): The description. Defaults to None.
|
|
|
|
|
|
|
|
depends_on (Optional[JobDependencyType], optional): The job dependencies. Defaults to None.
|
|
|
|
|
|
|
|
job_id (Optional[str], optional): Job ID. Defaults to None.
|
|
|
|
|
|
|
|
meta (Optional[Dict], optional): Job metadata. Defaults to None.
|
|
|
|
|
|
|
|
status (JobStatus, optional): Job status. Defaults to JobStatus.QUEUED.
|
|
|
|
|
|
|
|
retry (Optional[Retry], optional): The Retry Object. Defaults to None.
|
|
|
|
|
|
|
|
on_success (Optional[Callable], optional): On success callable. Defaults to None.
|
|
|
|
|
|
|
|
on_failure (Optional[Callable], optional): On failure callable. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
|
|
|
ValueError: If the timeout is 0
|
|
|
|
|
|
|
|
ValueError: If the job TTL is 0 or negative
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
Job: The created job
|
|
|
|
|
|
|
|
"""
|
|
|
|
timeout = parse_timeout(timeout)
|
|
|
|
timeout = parse_timeout(timeout)
|
|
|
|
|
|
|
|
|
|
|
|
if timeout is None:
|
|
|
|
if timeout is None:
|
|
|
@ -345,14 +475,22 @@ class Queue:
|
|
|
|
|
|
|
|
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
def setup_dependencies(self, job: 'Job', pipeline: t.Optional['Pipeline'] = None):
|
|
|
|
def setup_dependencies(self, job: 'Job', pipeline: Optional['Pipeline'] = None) -> 'Job':
|
|
|
|
# If a _dependent_ job depends on any unfinished job, register all the
|
|
|
|
"""If a _dependent_ job depends on any unfinished job, register all the
|
|
|
|
# _dependent_ job's dependencies instead of enqueueing it.
|
|
|
|
_dependent_ job's dependencies instead of enqueueing it.
|
|
|
|
#
|
|
|
|
|
|
|
|
# `Job#fetch_dependencies` sets WATCH on all dependencies. If
|
|
|
|
`Job#fetch_dependencies` sets WATCH on all dependencies. If
|
|
|
|
# WatchError is raised in the when the pipeline is executed, that means
|
|
|
|
WatchError is raised in the when the pipeline is executed, that means
|
|
|
|
# something else has modified either the set of dependencies or the
|
|
|
|
something else has modified either the set of dependencies or the
|
|
|
|
# status of one of them. In this case, we simply retry.
|
|
|
|
status of one of them. In this case, we simply retry.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job (Job): The job
|
|
|
|
|
|
|
|
pipeline (Optional[Pipeline], optional): The Redis Pipeline. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
job (Job): The Job
|
|
|
|
|
|
|
|
"""
|
|
|
|
if len(job._dependency_ids) > 0:
|
|
|
|
if len(job._dependency_ids) > 0:
|
|
|
|
orig_status = job.get_status(refresh=False)
|
|
|
|
orig_status = job.get_status(refresh=False)
|
|
|
|
pipe = pipeline if pipeline is not None else self.connection.pipeline()
|
|
|
|
pipe = pipeline if pipeline is not None else self.connection.pipeline()
|
|
|
@ -397,15 +535,38 @@ class Queue:
|
|
|
|
pipeline.multi() # Ensure pipeline in multi mode before returning to caller
|
|
|
|
pipeline.multi() # Ensure pipeline in multi mode before returning to caller
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_call(self, func: t.Callable[..., t.Any], args=None, kwargs=None, timeout=None,
|
|
|
|
def enqueue_call(self, func: 'FunctionReferenceType', args: Union[Tuple, List, None] = None, kwargs: Optional[Dict] = None,
|
|
|
|
result_ttl=None, ttl=None, failure_ttl=None, description=None,
|
|
|
|
timeout: Optional[int] = None, result_ttl: Optional[int] = None, ttl: Optional[int] = None,
|
|
|
|
depends_on=None, job_id: str = None, at_front: bool = False, meta=None,
|
|
|
|
failure_ttl: Optional[int] = None, description: Optional[str] = None, depends_on: Optional['JobDependencyType'] = None,
|
|
|
|
retry=None, on_success=None, on_failure=None, pipeline=None) -> Job:
|
|
|
|
job_id: Optional[str] = None, at_front: bool = False, meta: Optional[Dict] = None,
|
|
|
|
|
|
|
|
retry: Optional['Retry'] = None, on_success: Optional[Callable[..., Any]] = None,
|
|
|
|
|
|
|
|
on_failure: Optional[Callable[..., Any]] = None, pipeline: Optional['Pipeline'] = None) -> Job:
|
|
|
|
"""Creates a job to represent the delayed function call and enqueues it.
|
|
|
|
"""Creates a job to represent the delayed function call and enqueues it.
|
|
|
|
|
|
|
|
|
|
|
|
It is much like `.enqueue()`, except that it takes the function's args
|
|
|
|
It is much like `.enqueue()`, except that it takes the function's args
|
|
|
|
and kwargs as explicit arguments. Any kwargs passed to this function
|
|
|
|
and kwargs as explicit arguments. Any kwargs passed to this function
|
|
|
|
contain options for RQ itself.
|
|
|
|
contain options for RQ itself.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
func (FunctionReferenceType): The reference to the function
|
|
|
|
|
|
|
|
args (Union[Tuple, List, None], optional): THe `*args` to pass to the function. Defaults to None.
|
|
|
|
|
|
|
|
kwargs (Optional[Dict], optional): THe `**kwargs` to pass to the function. Defaults to None.
|
|
|
|
|
|
|
|
timeout (Optional[int], optional): Function timeout. Defaults to None.
|
|
|
|
|
|
|
|
result_ttl (Optional[int], optional): Result time to live. Defaults to None.
|
|
|
|
|
|
|
|
ttl (Optional[int], optional): Time to live. Defaults to None.
|
|
|
|
|
|
|
|
failure_ttl (Optional[int], optional): Failure time to live. Defaults to None.
|
|
|
|
|
|
|
|
description (Optional[str], optional): The job description. Defaults to None.
|
|
|
|
|
|
|
|
depends_on (Optional[JobDependencyType], optional): The job dependencies. Defaults to None.
|
|
|
|
|
|
|
|
job_id (Optional[str], optional): The job ID. Defaults to None.
|
|
|
|
|
|
|
|
at_front (bool, optional): Whether to enqueue the job at the front. Defaults to False.
|
|
|
|
|
|
|
|
meta (Optional[Dict], optional): Metadata to attach to the job. Defaults to None.
|
|
|
|
|
|
|
|
retry (Optional[Retry], optional): Retry object. Defaults to None.
|
|
|
|
|
|
|
|
on_success (Optional[Callable[..., Any]], optional): Callable for on success. Defaults to None.
|
|
|
|
|
|
|
|
on_failure (Optional[Callable[..., Any]], optional): Callable for on failure. Defaults to None.
|
|
|
|
|
|
|
|
pipeline (Optional[Pipeline], optional): The Redis Pipeline. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
Job: The enqueued Job
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
job = self.create_job(
|
|
|
|
job = self.create_job(
|
|
|
@ -425,12 +586,33 @@ class Queue:
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
@staticmethod
|
|
|
|
def prepare_data(func, args=None, kwargs=None, timeout=None,
|
|
|
|
def prepare_data(func: 'FunctionReferenceType', args: Union[Tuple, List, None] = None, kwargs: Optional[Dict] = None,
|
|
|
|
result_ttl=None, ttl=None, failure_ttl=None,
|
|
|
|
timeout: Optional[int] = None, result_ttl: Optional[int] = None, ttl: Optional[int] = None,
|
|
|
|
description=None, job_id=None,
|
|
|
|
failure_ttl: Optional[int] = None, description: Optional[str] = None, job_id: Optional[str] = None,
|
|
|
|
at_front=False, meta=None, retry=None, on_success=None, on_failure=None) -> EnqueueData:
|
|
|
|
at_front: bool = False, meta: Optional[Dict] = None, retry: Optional['Retry'] = None,
|
|
|
|
# Need this till support dropped for python_version < 3.7, where defaults can be specified for named tuples
|
|
|
|
on_success: Optional[Callable] = None, on_failure: Optional[Callable] = None) -> EnqueueData:
|
|
|
|
# And can keep this logic within EnqueueData
|
|
|
|
"""Need this till support dropped for python_version < 3.7, where defaults can be specified for named tuples
|
|
|
|
|
|
|
|
And can keep this logic within EnqueueData
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
func (FunctionReferenceType): The reference to the function
|
|
|
|
|
|
|
|
args (Union[Tuple, List, None], optional): THe `*args` to pass to the function. Defaults to None.
|
|
|
|
|
|
|
|
kwargs (Optional[Dict], optional): THe `**kwargs` to pass to the function. Defaults to None.
|
|
|
|
|
|
|
|
timeout (Optional[int], optional): Function timeout. Defaults to None.
|
|
|
|
|
|
|
|
result_ttl (Optional[int], optional): Result time to live. Defaults to None.
|
|
|
|
|
|
|
|
ttl (Optional[int], optional): Time to live. Defaults to None.
|
|
|
|
|
|
|
|
failure_ttl (Optional[int], optional): Failure time to live. Defaults to None.
|
|
|
|
|
|
|
|
description (Optional[str], optional): The job description. Defaults to None.
|
|
|
|
|
|
|
|
job_id (Optional[str], optional): The job ID. Defaults to None.
|
|
|
|
|
|
|
|
at_front (bool, optional): Whether to enqueue the job at the front. Defaults to False.
|
|
|
|
|
|
|
|
meta (Optional[Dict], optional): Metadata to attach to the job. Defaults to None.
|
|
|
|
|
|
|
|
retry (Optional[Retry], optional): Retry object. Defaults to None.
|
|
|
|
|
|
|
|
on_success (Optional[Callable[..., Any]], optional): Callable for on success. Defaults to None.
|
|
|
|
|
|
|
|
on_failure (Optional[Callable[..., Any]], optional): Callable for on failure. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
EnqueueData: The EnqueueData
|
|
|
|
|
|
|
|
"""
|
|
|
|
return EnqueueData(
|
|
|
|
return EnqueueData(
|
|
|
|
func, args, kwargs, timeout,
|
|
|
|
func, args, kwargs, timeout,
|
|
|
|
result_ttl, ttl, failure_ttl,
|
|
|
|
result_ttl, ttl, failure_ttl,
|
|
|
@ -438,10 +620,16 @@ class Queue:
|
|
|
|
at_front, meta, retry, on_success, on_failure
|
|
|
|
at_front, meta, retry, on_success, on_failure
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_many(self, job_datas, pipeline: t.Optional['Pipeline'] = None) -> t.List[Job]:
|
|
|
|
def enqueue_many(self, job_datas: List['EnqueueData'], pipeline: Optional['Pipeline'] = None) -> List[Job]:
|
|
|
|
"""
|
|
|
|
"""Creates multiple jobs (created via `Queue.prepare_data` calls)
|
|
|
|
Creates multiple jobs (created via `Queue.prepare_data` calls)
|
|
|
|
|
|
|
|
to represent the delayed function calls and enqueues them.
|
|
|
|
to represent the delayed function calls and enqueues them.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job_datas (List['EnqueueData']): A List of job data
|
|
|
|
|
|
|
|
pipeline (Optional[Pipeline], optional): The Redis Pipeline. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
List[Job]: A list of enqueued jobs
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
pipe = pipeline if pipeline is not None else self.connection.pipeline()
|
|
|
|
pipe = pipeline if pipeline is not None else self.connection.pipeline()
|
|
|
|
jobs = [
|
|
|
|
jobs = [
|
|
|
@ -467,6 +655,14 @@ class Queue:
|
|
|
|
return jobs
|
|
|
|
return jobs
|
|
|
|
|
|
|
|
|
|
|
|
def run_job(self, job: 'Job') -> Job:
|
|
|
|
def run_job(self, job: 'Job') -> Job:
|
|
|
|
|
|
|
|
"""Run the job
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job (Job): The job to run
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
Job: _description_
|
|
|
|
|
|
|
|
"""
|
|
|
|
job.perform()
|
|
|
|
job.perform()
|
|
|
|
job.set_status(JobStatus.FINISHED)
|
|
|
|
job.set_status(JobStatus.FINISHED)
|
|
|
|
job.save(include_meta=False)
|
|
|
|
job.save(include_meta=False)
|
|
|
@ -474,7 +670,7 @@ class Queue:
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
@classmethod
|
|
|
|
def parse_args(cls, f: t.Union[t.Callable[..., t.Any], str], *args, **kwargs):
|
|
|
|
def parse_args(cls, f: 'FunctionReferenceType', *args, **kwargs):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
Parses arguments passed to `queue.enqueue()` and `queue.enqueue_at()`
|
|
|
|
Parses arguments passed to `queue.enqueue()` and `queue.enqueue_at()`
|
|
|
|
|
|
|
|
|
|
|
@ -484,6 +680,11 @@ class Queue:
|
|
|
|
* A reference to an object's instance method
|
|
|
|
* A reference to an object's instance method
|
|
|
|
* A string, representing the location of a function (must be
|
|
|
|
* A string, representing the location of a function (must be
|
|
|
|
meaningful to the import context of the workers)
|
|
|
|
meaningful to the import context of the workers)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
f (FunctionReferenceType): The function reference
|
|
|
|
|
|
|
|
args (*args): function args
|
|
|
|
|
|
|
|
kwargs (*kwargs): function kargs
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
if not isinstance(f, str) and f.__module__ == '__main__':
|
|
|
|
if not isinstance(f, str) and f.__module__ == '__main__':
|
|
|
|
raise ValueError('Functions from the __main__ module cannot be processed '
|
|
|
|
raise ValueError('Functions from the __main__ module cannot be processed '
|
|
|
@ -514,9 +715,18 @@ class Queue:
|
|
|
|
depends_on, job_id, at_front, meta, retry, on_success, on_failure,
|
|
|
|
depends_on, job_id, at_front, meta, retry, on_success, on_failure,
|
|
|
|
pipeline, args, kwargs)
|
|
|
|
pipeline, args, kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue(self, f, *args, **kwargs):
|
|
|
|
def enqueue(self, f: 'FunctionReferenceType', *args, **kwargs) -> 'Job':
|
|
|
|
"""Creates a job to represent the delayed function call and enqueues it."""
|
|
|
|
"""Creates a job to represent the delayed function call and enqueues it.
|
|
|
|
|
|
|
|
Receives the same parameters accepted by the `enqueue_call` method.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
f (FunctionReferenceType): The function reference
|
|
|
|
|
|
|
|
args (*args): function args
|
|
|
|
|
|
|
|
kwargs (*kwargs): function kargs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
job (Job): The created Job
|
|
|
|
|
|
|
|
"""
|
|
|
|
(f, timeout, description, result_ttl, ttl, failure_ttl,
|
|
|
|
(f, timeout, description, result_ttl, ttl, failure_ttl,
|
|
|
|
depends_on, job_id, at_front, meta, retry, on_success,
|
|
|
|
depends_on, job_id, at_front, meta, retry, on_success,
|
|
|
|
on_failure, pipeline, args, kwargs) = Queue.parse_args(f, *args, **kwargs)
|
|
|
|
on_failure, pipeline, args, kwargs) = Queue.parse_args(f, *args, **kwargs)
|
|
|
@ -530,8 +740,15 @@ class Queue:
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_at(self, datetime: datetime, f, *args, **kwargs):
|
|
|
|
def enqueue_at(self, datetime: datetime, f, *args, **kwargs):
|
|
|
|
"""Schedules a job to be enqueued at specified time"""
|
|
|
|
"""Schedules a job to be enqueued at specified time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
datetime (datetime): _description_
|
|
|
|
|
|
|
|
f (_type_): _description_
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
_type_: _description_
|
|
|
|
|
|
|
|
"""
|
|
|
|
(f, timeout, description, result_ttl, ttl, failure_ttl,
|
|
|
|
(f, timeout, description, result_ttl, ttl, failure_ttl,
|
|
|
|
depends_on, job_id, at_front, meta, retry, on_success, on_failure,
|
|
|
|
depends_on, job_id, at_front, meta, retry, on_success, on_failure,
|
|
|
|
pipeline, args, kwargs) = Queue.parse_args(f, *args, **kwargs)
|
|
|
|
pipeline, args, kwargs) = Queue.parse_args(f, *args, **kwargs)
|
|
|
@ -544,8 +761,17 @@ class Queue:
|
|
|
|
job.enqueue_at_front = True
|
|
|
|
job.enqueue_at_front = True
|
|
|
|
return self.schedule_job(job, datetime, pipeline=pipeline)
|
|
|
|
return self.schedule_job(job, datetime, pipeline=pipeline)
|
|
|
|
|
|
|
|
|
|
|
|
def schedule_job(self, job: 'Job', datetime: datetime, pipeline: t.Optional['Pipeline'] = None):
|
|
|
|
def schedule_job(self, job: 'Job', datetime: datetime, pipeline: Optional['Pipeline'] = None):
|
|
|
|
"""Puts job on ScheduledJobRegistry"""
|
|
|
|
"""Puts job on ScheduledJobRegistry
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job (Job): _description_
|
|
|
|
|
|
|
|
datetime (datetime): _description_
|
|
|
|
|
|
|
|
pipeline (Optional[Pipeline], optional): _description_. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
_type_: _description_
|
|
|
|
|
|
|
|
"""
|
|
|
|
from .registry import ScheduledJobRegistry
|
|
|
|
from .registry import ScheduledJobRegistry
|
|
|
|
registry = ScheduledJobRegistry(queue=self)
|
|
|
|
registry = ScheduledJobRegistry(queue=self)
|
|
|
|
|
|
|
|
|
|
|
@ -559,15 +785,31 @@ class Queue:
|
|
|
|
pipe.execute()
|
|
|
|
pipe.execute()
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_in(self, time_delta, func, *args, **kwargs):
|
|
|
|
def enqueue_in(self, time_delta: timedelta, func: 'FunctionReferenceType', *args, **kwargs) -> 'Job':
|
|
|
|
"""Schedules a job to be executed in a given `timedelta` object"""
|
|
|
|
"""Schedules a job to be executed in a given `timedelta` object
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
time_delta (timedelta): The timedelta object
|
|
|
|
|
|
|
|
func (FunctionReferenceType): The function reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
job (Job): The enqueued Job
|
|
|
|
|
|
|
|
"""
|
|
|
|
return self.enqueue_at(datetime.now(timezone.utc) + time_delta,
|
|
|
|
return self.enqueue_at(datetime.now(timezone.utc) + time_delta,
|
|
|
|
func, *args, **kwargs)
|
|
|
|
func, *args, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_job(self, job: 'Job', pipeline: t.Optional['Pipeline'] = None, at_front: bool = False) -> Job:
|
|
|
|
def enqueue_job(self, job: 'Job', pipeline: Optional['Pipeline'] = None, at_front: bool = False) -> Job:
|
|
|
|
"""Enqueues a job for delayed execution.
|
|
|
|
"""Enqueues a job for delayed execution.
|
|
|
|
|
|
|
|
|
|
|
|
If Queue is instantiated with is_async=False, job is executed immediately.
|
|
|
|
If Queue is instantiated with is_async=False, job is executed immediately.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job (Job): The job to enqueue
|
|
|
|
|
|
|
|
pipeline (Optional[Pipeline], optional): The Redis pipeline to use. Defaults to None.
|
|
|
|
|
|
|
|
at_front (bool, optional): Whether should enqueue at the front of the queue. Defaults to False.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
Job: The enqued job
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
pipe = pipeline if pipeline is not None else self.connection.pipeline()
|
|
|
|
pipe = pipeline if pipeline is not None else self.connection.pipeline()
|
|
|
|
|
|
|
|
|
|
|
@ -596,6 +838,14 @@ class Queue:
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
def run_sync(self, job: 'Job') -> 'Job':
|
|
|
|
def run_sync(self, job: 'Job') -> 'Job':
|
|
|
|
|
|
|
|
"""Run a job synchronously, meaning on the same process the method was called.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job (Job): The job to run
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
Job: The job instance
|
|
|
|
|
|
|
|
"""
|
|
|
|
with self.connection.pipeline() as pipeline:
|
|
|
|
with self.connection.pipeline() as pipeline:
|
|
|
|
job.prepare_for_execution('sync', pipeline)
|
|
|
|
job.prepare_for_execution('sync', pipeline)
|
|
|
|
|
|
|
|
|
|
|
@ -611,12 +861,17 @@ class Queue:
|
|
|
|
|
|
|
|
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_dependents(self, job: 'Job', pipeline: t.Optional['Pipeline'] = None, exclude_job_id=None):
|
|
|
|
def enqueue_dependents(self, job: 'Job', pipeline: Optional['Pipeline'] = None, exclude_job_id: Optional[str] = None):
|
|
|
|
"""Enqueues all jobs in the given job's dependents set and clears it.
|
|
|
|
"""Enqueues all jobs in the given job's dependents set and clears it.
|
|
|
|
|
|
|
|
|
|
|
|
When called without a pipeline, this method uses WATCH/MULTI/EXEC.
|
|
|
|
When called without a pipeline, this method uses WATCH/MULTI/EXEC.
|
|
|
|
If you pass a pipeline, only MULTI is called. The rest is up to the
|
|
|
|
If you pass a pipeline, only MULTI is called. The rest is up to the
|
|
|
|
caller.
|
|
|
|
caller.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
job (Job): The Job to enqueue the dependents
|
|
|
|
|
|
|
|
pipeline (Optional[Pipeline], optional): The Redis Pipeline. Defaults to None.
|
|
|
|
|
|
|
|
exclude_job_id (Optional[str], optional): Whether to exclude the job id. Defaults to None.
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
from .registry import DeferredJobRegistry
|
|
|
|
from .registry import DeferredJobRegistry
|
|
|
|
|
|
|
|
|
|
|
@ -690,12 +945,16 @@ class Queue:
|
|
|
|
# handle it
|
|
|
|
# handle it
|
|
|
|
raise
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
def pop_job_id(self):
|
|
|
|
def pop_job_id(self) -> Optional[str]:
|
|
|
|
"""Pops a given job ID from this Redis queue."""
|
|
|
|
"""Pops a given job ID from this Redis queue.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
job_id (str): The job id
|
|
|
|
|
|
|
|
"""
|
|
|
|
return as_text(self.connection.lpop(self.key))
|
|
|
|
return as_text(self.connection.lpop(self.key))
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
@classmethod
|
|
|
|
def lpop(cls, queue_keys, timeout: int, connection: t.Optional['Redis'] = None):
|
|
|
|
def lpop(cls, queue_keys: List[str], timeout: int, connection: Optional['Redis'] = None):
|
|
|
|
"""Helper method. Intermediate method to abstract away from some
|
|
|
|
"""Helper method. Intermediate method to abstract away from some
|
|
|
|
Redis API details, where LPOP accepts only a single key, whereas BLPOP
|
|
|
|
Redis API details, where LPOP accepts only a single key, whereas BLPOP
|
|
|
|
accepts multiple. So if we want the non-blocking LPOP, we need to
|
|
|
|
accepts multiple. So if we want the non-blocking LPOP, we need to
|
|
|
@ -707,6 +966,18 @@ class Queue:
|
|
|
|
The timeout parameter is interpreted as follows:
|
|
|
|
The timeout parameter is interpreted as follows:
|
|
|
|
None - non-blocking (return immediately)
|
|
|
|
None - non-blocking (return immediately)
|
|
|
|
> 0 - maximum number of seconds to block
|
|
|
|
> 0 - maximum number of seconds to block
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
queue_keys (_type_): _description_
|
|
|
|
|
|
|
|
timeout (int): _description_
|
|
|
|
|
|
|
|
connection (Optional[Redis], optional): _description_. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
|
|
|
ValueError: If timeout of 0 was passed
|
|
|
|
|
|
|
|
DequeueTimeout: BLPOP Timeout
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
_type_: _description_
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
connection = resolve_connection(connection)
|
|
|
|
connection = resolve_connection(connection)
|
|
|
|
if timeout is not None: # blocking variant
|
|
|
|
if timeout is not None: # blocking variant
|
|
|
@ -728,8 +999,8 @@ class Queue:
|
|
|
|
return None
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
@classmethod
|
|
|
|
def dequeue_any(cls, queues, timeout, connection: t.Optional['Redis'] = None,
|
|
|
|
def dequeue_any(cls, queues: List['Queue'], timeout: int, connection: Optional['Redis'] = None,
|
|
|
|
job_class: t.Optional[t.Type['Job']] = None, serializer=None):
|
|
|
|
job_class: Optional['Job'] = None, serializer: Any = None) -> Tuple['Job', 'Queue']:
|
|
|
|
"""Class method returning the job_class instance at the front of the given
|
|
|
|
"""Class method returning the job_class instance at the front of the given
|
|
|
|
set of Queues, where the order of the queues is important.
|
|
|
|
set of Queues, where the order of the queues is important.
|
|
|
|
|
|
|
|
|
|
|
@ -739,8 +1010,21 @@ class Queue:
|
|
|
|
None.
|
|
|
|
None.
|
|
|
|
|
|
|
|
|
|
|
|
See the documentation of cls.lpop for the interpretation of timeout.
|
|
|
|
See the documentation of cls.lpop for the interpretation of timeout.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
queues (List[Queue]): List of queue objects
|
|
|
|
|
|
|
|
timeout (int): Timeout for the LPOP
|
|
|
|
|
|
|
|
connection (Optional[Redis], optional): Redis Connection. Defaults to None.
|
|
|
|
|
|
|
|
job_class (Optional[Job], optional): The job classification. Defaults to None.
|
|
|
|
|
|
|
|
serializer (Any, optional): Serializer to use. Defaults to None.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
|
|
|
e: Any exception
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
job, queue (Tuple[Job, Queue]): A tuple of Job, Queue
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
job_class = backend_class(cls, 'job_class', override=job_class)
|
|
|
|
job_class: Job = backend_class(cls, 'job_class', override=job_class)
|
|
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
while True:
|
|
|
|
queue_keys = [q.key for q in queues]
|
|
|
|
queue_keys = [q.key for q in queues]
|
|
|
|