refactored worker code

Moved code into a new handle_job_success() method and reduced context of used
pipelines.
main
Stefan Hammer 8 years ago
parent 44f98693c7
commit a0cee2d2a0

@ -536,15 +536,9 @@ class Worker(object):
# Job completed and its ttl has expired # Job completed and its ttl has expired
break break
if job_status not in [JobStatus.FINISHED, JobStatus.FAILED]: if job_status not in [JobStatus.FINISHED, JobStatus.FAILED]:
with self.connection._pipeline() as pipeline:
self.handle_job_failure( self.handle_job_failure(
job=job, job=job
pipeline=pipeline
) )
try:
pipeline.execute()
except Exception:
pass
#Unhandled failure: move the job to the failed queue #Unhandled failure: move the job to the failed queue
self.log.warning( self.log.warning(
@ -631,8 +625,7 @@ class Worker(object):
def handle_job_failure( def handle_job_failure(
self, self,
job, job,
started_job_registry=None, started_job_registry=None
pipeline=None
): ):
"""Handles the failure or an executing job by: """Handles the failure or an executing job by:
1. Setting the job status to failed 1. Setting the job status to failed
@ -640,6 +633,7 @@ class Worker(object):
3. Setting the workers current job to None 3. Setting the workers current job to None
""" """
with self.connection._pipeline() as pipeline:
if started_job_registry is None: if started_job_registry is None:
started_job_registry = StartedJobRegistry( started_job_registry = StartedJobRegistry(
job.origin, job.origin,
@ -648,31 +642,20 @@ class Worker(object):
job.set_status(JobStatus.FAILED, pipeline=pipeline) job.set_status(JobStatus.FAILED, pipeline=pipeline)
started_job_registry.remove(job, pipeline=pipeline) started_job_registry.remove(job, pipeline=pipeline)
self.set_current_job_id(None, pipeline=pipeline) self.set_current_job_id(None, pipeline=pipeline)
def perform_job(self, job, queue):
"""Performs the actual work of a job. Will/should only be called
inside the work horse's process.
"""
self.prepare_job_execution(job)
with self.connection._pipeline() as pipeline:
push_connection(self.connection)
started_job_registry = StartedJobRegistry(job.origin, self.connection)
try: try:
with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT): pipeline.execute()
rv = job.perform() except Exception:
# Ensure that custom exception handlers are called
# Pickle the result in the same try-except block since we need # even if Redis is down
# to use the same exc handling when pickling fails pass
job._result = rv
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0:
job.ended_at = utcnow()
def handle_job_success(
self,
job,
queue,
started_job_registry
):
with self.connection._pipeline() as pipeline:
while True: while True:
try: try:
# if dependencies are inserted after enqueue_dependents # if dependencies are inserted after enqueue_dependents
@ -683,6 +666,7 @@ class Worker(object):
self.set_current_job_id(None, pipeline=pipeline) self.set_current_job_id(None, pipeline=pipeline)
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0: if result_ttl != 0:
job.set_status(JobStatus.FINISHED, pipeline=pipeline) job.set_status(JobStatus.FINISHED, pipeline=pipeline)
job.save(pipeline=pipeline) job.save(pipeline=pipeline)
@ -700,18 +684,36 @@ class Worker(object):
except WatchError: except WatchError:
continue continue
def perform_job(self, job, queue):
"""Performs the actual work of a job. Will/should only be called
inside the work horse's process.
"""
self.prepare_job_execution(job)
push_connection(self.connection)
started_job_registry = StartedJobRegistry(job.origin, self.connection)
try:
with self.death_penalty_class(job.timeout or self.queue_class.DEFAULT_TIMEOUT):
rv = job.perform()
job.ended_at = utcnow()
# Pickle the result in the same try-except block since we need
# to use the same exc handling when pickling fails
job._result = rv
self.handle_job_success(
job=job,
queue=queue,
started_job_registry=started_job_registry
)
except Exception: except Exception:
self.handle_job_failure( self.handle_job_failure(
job=job, job=job,
started_job_registry=started_job_registry, started_job_registry=started_job_registry
pipeline=pipeline
) )
try:
pipeline.execute()
except Exception:
# Ensure that custom exception handlers are called
# even if Redis is down
pass
self.handle_exception(job, *sys.exc_info()) self.handle_exception(job, *sys.exc_info())
return False return False
@ -723,6 +725,7 @@ class Worker(object):
log_result = "{0!r}".format(as_text(text_type(rv))) log_result = "{0!r}".format(as_text(text_type(rv)))
self.log.debug('Result: {0}'.format(yellow(log_result))) self.log.debug('Result: {0}'.format(yellow(log_result)))
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl == 0: if result_ttl == 0:
self.log.info('Result discarded immediately') self.log.info('Result discarded immediately')
elif result_ttl > 0: elif result_ttl > 0:

@ -578,7 +578,7 @@ class TestWorker(RQTestCase):
def new_enqueue_dependents(self, job, *args, **kwargs): def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs) orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue.id == job.id: if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job) Queue().enqueue_call(say_hello, depends_on=job)

Loading…
Cancel
Save