@ -5,6 +5,7 @@ from __future__ import (absolute_import, division, print_function,
import uuid
import uuid
import warnings
import warnings
from collections import namedtuple
from datetime import datetime , timezone
from datetime import datetime , timezone
from distutils . version import StrictVersion
from distutils . version import StrictVersion
@ -23,6 +24,17 @@ def compact(lst):
return [ item for item in lst if item is not None ]
return [ item for item in lst if item is not None ]
class EnqueueData ( namedtuple ( ' EnqueueData ' , [ " func " , " args " , " kwargs " , " timeout " ,
" result_ttl " , " ttl " , " failure_ttl " ,
" description " , " job_id " ,
" at_front " , " meta " , " retry " ] ) ) :
""" Helper type to use when calling enqueue_many
NOTE : Does not support ` depends_on ` yet .
"""
__slots__ = ( )
@total_ordering
@total_ordering
class Queue :
class Queue :
job_class = Job
job_class = Job
@ -310,10 +322,56 @@ class Queue:
return job
return job
def setup_dependencies (
self ,
job ,
pipeline = None
) :
# If a _dependent_ job depends on any unfinished job, register all the
# _dependent_ job's dependencies instead of enqueueing it.
#
# `Job#fetch_dependencies` sets WATCH on all dependencies. If
# WatchError is raised in the when the pipeline is executed, that means
# something else has modified either the set of dependencies or the
# status of one of them. In this case, we simply retry.
if len ( job . _dependency_ids ) > 0 :
pipe = pipeline if pipeline is not None else self . connection . pipeline ( )
while True :
try :
# Also calling watch even if caller
# passed in a pipeline since Queue#create_job
# is called from within this method.
pipe . watch ( job . dependencies_key )
dependencies = job . fetch_dependencies (
watch = True ,
pipeline = pipe
)
pipe . multi ( )
for dependency in dependencies :
if dependency . get_status ( refresh = False ) != JobStatus . FINISHED :
job . set_status ( JobStatus . DEFERRED , pipeline = pipe )
job . register_dependency ( pipeline = pipe )
job . save ( pipeline = pipe )
job . cleanup ( ttl = job . ttl , pipeline = pipe )
if pipeline is None :
pipe . execute ( )
return job
break
except WatchError :
if pipeline is None :
continue
else :
# if pipeline comes from caller, re-raise to them
raise
return job
def enqueue_call ( self , func , args = None , kwargs = None , timeout = None ,
def enqueue_call ( self , func , args = None , kwargs = None , timeout = None ,
result_ttl = None , ttl = None , failure_ttl = None ,
result_ttl = None , ttl = None , failure_ttl = None ,
description = None , depends_on = None , job_id = None ,
description = None , depends_on = None , job_id = None ,
at_front = False , meta = None , retry = None ) :
at_front = False , meta = None , retry = None , pipeline = None ):
""" Creates a job to represent the delayed function call and enqueues
""" Creates a job to represent the delayed function call and enqueues
it .
it .
nd
nd
@ -329,42 +387,58 @@ nd
retry = retry
retry = retry
)
)
# If a _dependent_ job depends on any unfinished job, register all the
job = self . setup_dependencies (
# _dependent_ job's dependencies instead of enqueueing it.
job ,
#
pipeline = pipeline
# `Job#fetch_dependencies` sets WATCH on all dependencies. If
)
# WatchError is raised in the when the pipeline is executed, that means
# If we do not depend on an unfinished job, enqueue the job.
# something else has modified either the set of dependencies or the
if job . get_status ( refresh = False ) != JobStatus . DEFERRED :
# status of one of them. In this case, we simply retry.
return self . enqueue_job ( job , pipeline = pipeline , at_front = at_front )
if depends_on is not None :
return job
with self . connection . pipeline ( ) as pipe :
while True :
try :
pipe . watch ( job . dependencies_key )
dependencies = job . fetch_dependencies (
watch = True ,
pipeline = pipe
)
pipe . multi ( )
for dependency in dependencies :
if dependency . get_status ( refresh = False ) != JobStatus . FINISHED :
job . set_status ( JobStatus . DEFERRED , pipeline = pipe )
job . register_dependency ( pipeline = pipe )
job . save ( pipeline = pipe )
job . cleanup ( ttl = job . ttl , pipeline = pipe )
pipe . execute ( )
return job
break
@staticmethod
except WatchError :
def prepare_data ( func , args = None , kwargs = None , timeout = None ,
continue
result_ttl = None , ttl = None , failure_ttl = None ,
description = None , job_id = None ,
at_front = False , meta = None , retry = None ) :
# Need this till support dropped for python_version < 3.7, where defaults can be specified for named tuples
# And can keep this logic within EnqueueData
return EnqueueData (
func , args , kwargs , timeout ,
result_ttl , ttl , failure_ttl ,
description , job_id ,
at_front , meta , retry
)
job = self . enqueue_job ( job , at_front = at_front )
def enqueue_many (
return job
self ,
job_datas ,
pipeline = None
) :
"""
Creates multiple jobs ( created via ` Queue . prepare_data ` calls )
to represent the delayed function calls and enqueues them .
"""
pipe = pipeline if pipeline is not None else self . connection . pipeline ( )
jobs = [
self . enqueue_job (
self . create_job (
job_data . func , args = job_data . args , kwargs = job_data . kwargs , result_ttl = job_data . result_ttl ,
ttl = job_data . ttl ,
failure_ttl = job_data . failure_ttl , description = job_data . description ,
depends_on = None ,
job_id = job_data . job_id , meta = job_data . meta , status = JobStatus . QUEUED ,
timeout = job_data . timeout ,
retry = job_data . retry
) ,
pipeline = pipe ,
at_front = job_data . at_front
)
for job_data in job_datas
]
if pipeline is None :
pipe . execute ( )
return jobs
def run_job ( self , job ) :
def run_job ( self , job ) :
job . perform ( )
job . perform ( )
@ -401,6 +475,7 @@ nd
at_front = kwargs . pop ( ' at_front ' , False )
at_front = kwargs . pop ( ' at_front ' , False )
meta = kwargs . pop ( ' meta ' , None )
meta = kwargs . pop ( ' meta ' , None )
retry = kwargs . pop ( ' retry ' , None )
retry = kwargs . pop ( ' retry ' , None )
pipeline = kwargs . pop ( ' pipeline ' , None )
if ' args ' in kwargs or ' kwargs ' in kwargs :
if ' args ' in kwargs or ' kwargs ' in kwargs :
assert args == ( ) , ' Extra positional arguments cannot be used when using explicit args and kwargs ' # noqa
assert args == ( ) , ' Extra positional arguments cannot be used when using explicit args and kwargs ' # noqa
@ -408,32 +483,32 @@ nd
kwargs = kwargs . pop ( ' kwargs ' , None )
kwargs = kwargs . pop ( ' kwargs ' , None )
return ( f , timeout , description , result_ttl , ttl , failure_ttl ,
return ( f , timeout , description , result_ttl , ttl , failure_ttl ,
depends_on , job_id , at_front , meta , retry , args, kwargs )
depends_on , job_id , at_front , meta , retry , pipeline, args, kwargs )
def enqueue ( self , f , * args , * * kwargs ) :
def enqueue ( self , f , * args , * * kwargs ) :
""" Creates a job to represent the delayed function call and enqueues it. """
""" Creates a job to represent the delayed function call and enqueues it. """
( f , timeout , description , result_ttl , ttl , failure_ttl ,
( f , timeout , description , result_ttl , ttl , failure_ttl ,
depends_on , job_id , at_front , meta , retry , args, kwargs ) = Queue . parse_args ( f , * args , * * kwargs )
depends_on , job_id , at_front , meta , retry , pipeline, args, kwargs ) = Queue . parse_args ( f , * args , * * kwargs )
return self . enqueue_call (
return self . enqueue_call (
func = f , args = args , kwargs = kwargs , timeout = timeout ,
func = f , args = args , kwargs = kwargs , timeout = timeout ,
result_ttl = result_ttl , ttl = ttl , failure_ttl = failure_ttl ,
result_ttl = result_ttl , ttl = ttl , failure_ttl = failure_ttl ,
description = description , depends_on = depends_on , job_id = job_id ,
description = description , depends_on = depends_on , job_id = job_id ,
at_front = at_front , meta = meta , retry = retry
at_front = at_front , meta = meta , retry = retry , pipeline = pipeline
)
)
def enqueue_at ( self , datetime , f , * args , * * kwargs ) :
def enqueue_at ( self , datetime , f , * args , * * kwargs ) :
""" Schedules a job to be enqueued at specified time """
""" Schedules a job to be enqueued at specified time """
( f , timeout , description , result_ttl , ttl , failure_ttl ,
( f , timeout , description , result_ttl , ttl , failure_ttl ,
depends_on , job_id , at_front , meta , retry , args, kwargs ) = Queue . parse_args ( f , * args , * * kwargs )
depends_on , job_id , at_front , meta , retry , pipeline, args, kwargs ) = Queue . parse_args ( f , * args , * * kwargs )
job = self . create_job ( f , status = JobStatus . SCHEDULED , args = args , kwargs = kwargs ,
job = self . create_job ( f , status = JobStatus . SCHEDULED , args = args , kwargs = kwargs ,
timeout = timeout , result_ttl = result_ttl , ttl = ttl ,
timeout = timeout , result_ttl = result_ttl , ttl = ttl ,
failure_ttl = failure_ttl , description = description ,
failure_ttl = failure_ttl , description = description ,
depends_on = depends_on , job_id = job_id , meta = meta , retry = retry )
depends_on = depends_on , job_id = job_id , meta = meta , retry = retry )
return self . schedule_job ( job , datetime )
return self . schedule_job ( job , datetime , pipeline = pipeline )
def schedule_job ( self , job , datetime , pipeline = None ) :
def schedule_job ( self , job , datetime , pipeline = None ) :
""" Puts job on ScheduledJobRegistry """
""" Puts job on ScheduledJobRegistry """