|
|
@ -3,10 +3,13 @@ import uuid
|
|
|
|
|
|
|
|
|
|
|
|
from .connections import resolve_connection
|
|
|
|
from .connections import resolve_connection
|
|
|
|
from .job import Job, Status
|
|
|
|
from .job import Job, Status
|
|
|
|
from .exceptions import (NoSuchJobError, UnpickleError,
|
|
|
|
|
|
|
|
InvalidJobOperationError, DequeueTimeout)
|
|
|
|
from .exceptions import (DequeueTimeout, InvalidJobOperationError,
|
|
|
|
|
|
|
|
NoSuchJobError, UnpickleError)
|
|
|
|
from .compat import total_ordering, string_types, as_text
|
|
|
|
from .compat import total_ordering, string_types, as_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from redis import WatchError
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_failed_queue(connection=None):
|
|
|
|
def get_failed_queue(connection=None):
|
|
|
|
"""Returns a handle to the special failed queue."""
|
|
|
|
"""Returns a handle to the special failed queue."""
|
|
|
@ -131,8 +134,9 @@ class Queue(object):
|
|
|
|
"""Pushes a job ID on the corresponding Redis queue."""
|
|
|
|
"""Pushes a job ID on the corresponding Redis queue."""
|
|
|
|
self.connection.rpush(self.key, job_id)
|
|
|
|
self.connection.rpush(self.key, job_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_call(self, func, args=None, kwargs=None, timeout=None,
|
|
|
|
def enqueue_call(self, func, args=None, kwargs=None, timeout=None,
|
|
|
|
result_ttl=None, description=None):
|
|
|
|
result_ttl=None, description=None, after=None):
|
|
|
|
"""Creates a job to represent the delayed function call and enqueues
|
|
|
|
"""Creates a job to represent the delayed function call and enqueues
|
|
|
|
it.
|
|
|
|
it.
|
|
|
|
|
|
|
|
|
|
|
@ -141,8 +145,29 @@ class Queue(object):
|
|
|
|
contain options for RQ itself.
|
|
|
|
contain options for RQ itself.
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
timeout = timeout or self._default_timeout
|
|
|
|
timeout = timeout or self._default_timeout
|
|
|
|
job = Job.create(func, args, kwargs, description=description, connection=self.connection,
|
|
|
|
|
|
|
|
result_ttl=result_ttl, status=Status.QUEUED)
|
|
|
|
# TODO: job with dependency shouldn't have "queued" as status
|
|
|
|
|
|
|
|
job = Job.create(func, args, kwargs, connection=self.connection,
|
|
|
|
|
|
|
|
result_ttl=result_ttl, status=Status.QUEUED,
|
|
|
|
|
|
|
|
description=description, dependency=after)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# If job depends on an unfinished job, register itself on it's
|
|
|
|
|
|
|
|
# parent's waitlist instead of enqueueing it.
|
|
|
|
|
|
|
|
# If WatchError is raised in the process, that means something else is
|
|
|
|
|
|
|
|
# modifying the dependency. In this case we simply retry
|
|
|
|
|
|
|
|
if after is not None:
|
|
|
|
|
|
|
|
with self.connection.pipeline() as pipe:
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
|
|
pipe.watch(after.key)
|
|
|
|
|
|
|
|
if after.status != Status.FINISHED:
|
|
|
|
|
|
|
|
job.register_dependency()
|
|
|
|
|
|
|
|
job.save()
|
|
|
|
|
|
|
|
return job
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
except WatchError:
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
|
|
return self.enqueue_job(job, timeout=timeout)
|
|
|
|
return self.enqueue_job(job, timeout=timeout)
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue(self, f, *args, **kwargs):
|
|
|
|
def enqueue(self, f, *args, **kwargs):
|
|
|
@ -168,16 +193,19 @@ class Queue(object):
|
|
|
|
timeout = None
|
|
|
|
timeout = None
|
|
|
|
description = None
|
|
|
|
description = None
|
|
|
|
result_ttl = None
|
|
|
|
result_ttl = None
|
|
|
|
if 'args' in kwargs or 'kwargs' in kwargs:
|
|
|
|
after = None
|
|
|
|
|
|
|
|
if 'args' in kwargs or 'kwargs' in kwargs or 'after' in kwargs:
|
|
|
|
assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs.' # noqa
|
|
|
|
assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs.' # noqa
|
|
|
|
timeout = kwargs.pop('timeout', None)
|
|
|
|
timeout = kwargs.pop('timeout', None)
|
|
|
|
description = kwargs.pop('description', None)
|
|
|
|
description = kwargs.pop('description', None)
|
|
|
|
args = kwargs.pop('args', None)
|
|
|
|
args = kwargs.pop('args', None)
|
|
|
|
result_ttl = kwargs.pop('result_ttl', None)
|
|
|
|
result_ttl = kwargs.pop('result_ttl', None)
|
|
|
|
|
|
|
|
after = kwargs.pop('after', None)
|
|
|
|
kwargs = kwargs.pop('kwargs', None)
|
|
|
|
kwargs = kwargs.pop('kwargs', None)
|
|
|
|
|
|
|
|
|
|
|
|
return self.enqueue_call(func=f, args=args, kwargs=kwargs, description=description,
|
|
|
|
return self.enqueue_call(func=f, args=args, kwargs=kwargs,
|
|
|
|
timeout=timeout, result_ttl=result_ttl)
|
|
|
|
timeout=timeout, result_ttl=result_ttl,
|
|
|
|
|
|
|
|
description=description, after=after)
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_job(self, job, timeout=None, set_meta_data=True):
|
|
|
|
def enqueue_job(self, job, timeout=None, set_meta_data=True):
|
|
|
|
"""Enqueues a job for delayed execution.
|
|
|
|
"""Enqueues a job for delayed execution.
|
|
|
@ -208,6 +236,16 @@ class Queue(object):
|
|
|
|
job.save()
|
|
|
|
job.save()
|
|
|
|
return job
|
|
|
|
return job
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def enqueue_waitlist(self, job):
|
|
|
|
|
|
|
|
"""Enqueues all jobs in the waitlist and clears it"""
|
|
|
|
|
|
|
|
# TODO: can probably be pipelined
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
|
|
|
job_id = as_text(self.connection.lpop(job.waitlist_key))
|
|
|
|
|
|
|
|
if job_id is None:
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
waitlisted_job = Job.fetch(job_id, connection=self.connection)
|
|
|
|
|
|
|
|
self.enqueue_job(waitlisted_job)
|
|
|
|
|
|
|
|
|
|
|
|
def pop_job_id(self):
|
|
|
|
def pop_job_id(self):
|
|
|
|
"""Pops a given job ID from this Redis queue."""
|
|
|
|
"""Pops a given job ID from this Redis queue."""
|
|
|
|
return as_text(self.connection.lpop(self.key))
|
|
|
|
return as_text(self.connection.lpop(self.key))
|
|
|
|