Sync jobs should save job exceptions and results (#1799)

* Sync jobs should save job exceptions and results

* Make job.handle_success() and job.handle_failure() private methods
main
Selwin Ong 2 years ago committed by GitHub
parent 27cbf48ad4
commit 83fa0adf15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1224,7 +1224,7 @@ class Job:
""" """
return default_ttl if self.ttl is None else self.ttl return default_ttl if self.ttl is None else self.ttl
def get_result_ttl(self, default_ttl: Optional[int] = None) -> Optional[int]: def get_result_ttl(self, default_ttl: int) -> int:
"""Returns ttl for a job that determines how long a jobs result will """Returns ttl for a job that determines how long a jobs result will
be persisted. In the future, this method will also be responsible be persisted. In the future, this method will also be responsible
for determining ttl for repeated jobs. for determining ttl for repeated jobs.
@ -1287,6 +1287,52 @@ class Job:
self.origin, connection=self.connection, job_class=self.__class__, serializer=self.serializer self.origin, connection=self.connection, job_class=self.__class__, serializer=self.serializer
) )
@property
def finished_job_registry(self):
from .registry import FinishedJobRegistry
return FinishedJobRegistry(
self.origin, connection=self.connection, job_class=self.__class__, serializer=self.serializer
)
def _handle_success(self, result_ttl: int, pipeline: 'Pipeline'):
"""Saves and cleanup job after successful execution"""
# self.log.debug('Setting job %s status to finished', job.id)
self.set_status(JobStatus.FINISHED, pipeline=pipeline)
# Result should be saved in job hash only if server
# doesn't support Redis streams
include_result = not self.supports_redis_streams
# Don't clobber user's meta dictionary!
self.save(pipeline=pipeline, include_meta=False, include_result=include_result)
# Result creation should eventually be moved to job.save() after support
# for Redis < 5.0 is dropped. job.save(include_result=...) is used to test
# for backward compatibility
if self.supports_redis_streams:
from .results import Result
Result.create(
self, Result.Type.SUCCESSFUL, return_value=self._result, ttl=result_ttl, pipeline=pipeline
)
if result_ttl != 0:
finished_job_registry = self.finished_job_registry
finished_job_registry.add(self, result_ttl, pipeline)
def _handle_failure(self, exc_string: str, pipeline: 'Pipeline'):
failed_job_registry = self.failed_job_registry
# Exception should be saved in job hash if server
# doesn't support Redis streams
_save_exc_to_job = not self.supports_redis_streams
failed_job_registry.add(
self,
ttl=self.failure_ttl,
exc_string=exc_string,
pipeline=pipeline,
_save_exc_to_job=_save_exc_to_job,
)
if self.supports_redis_streams:
from .results import Result
Result.create_failure(self, self.failure_ttl, exc_string=exc_string, pipeline=pipeline)
def get_retry_interval(self) -> int: def get_retry_interval(self) -> int:
"""Returns the desired retry interval. """Returns the desired retry interval.
If number of retries is bigger than length of intervals, the first If number of retries is bigger than length of intervals, the first

@ -1,7 +1,9 @@
import uuid import logging
import sys import sys
import traceback
import uuid
import warnings import warnings
import logging
from collections import namedtuple from collections import namedtuple
from datetime import datetime, timezone, timedelta from datetime import datetime, timezone, timedelta
from functools import total_ordering from functools import total_ordering
@ -769,9 +771,11 @@ class Queue:
Job: _description_ Job: _description_
""" """
job.perform() job.perform()
job.set_status(JobStatus.FINISHED) result_ttl = job.get_result_ttl(default_ttl=DEFAULT_RESULT_TTL)
job.save(include_meta=False) with self.connection.pipeline() as pipeline:
job.cleanup(job.get_result_ttl(default_ttl=DEFAULT_RESULT_TTL)) job._handle_success(result_ttl=result_ttl, pipeline=pipeline)
job.cleanup(result_ttl, pipeline=pipeline)
pipeline.execute()
return job return job
@classmethod @classmethod
@ -1024,7 +1028,12 @@ class Queue:
try: try:
job = self.run_job(job) job = self.run_job(job)
except: # noqa except: # noqa
job.set_status(JobStatus.FAILED) with self.connection.pipeline() as pipeline:
job.set_status(JobStatus.FAILED, pipeline=pipeline)
exc_string = ''.join(traceback.format_exception(*sys.exc_info()))
job._handle_failure(exc_string, pipeline)
pipeline.execute()
if job.failure_callback: if job.failure_callback:
job.failure_callback(job, self.connection, *sys.exc_info()) job.failure_callback(job, self.connection, *sys.exc_info())
else: else:

@ -9,18 +9,15 @@ import time
import traceback import traceback
import warnings import warnings
from typing import TYPE_CHECKING, Type, List, Dict, Any
if TYPE_CHECKING:
from redis import Redis
from redis.client import Pipeline
from datetime import timedelta from datetime import timedelta
from enum import Enum from enum import Enum
from uuid import uuid4 from uuid import uuid4
from random import shuffle from random import shuffle
from typing import Callable, List, Optional from typing import Callable, List, Optional, TYPE_CHECKING, Type
if TYPE_CHECKING:
from redis import Redis
from redis.client import Pipeline
try: try:
from signal import SIGKILL from signal import SIGKILL
@ -47,8 +44,7 @@ from .exceptions import DeserializationError, DequeueTimeout, ShutDownImminentEx
from .job import Job, JobStatus from .job import Job, JobStatus
from .logutils import setup_loghandlers from .logutils import setup_loghandlers
from .queue import Queue from .queue import Queue
from .registry import FailedJobRegistry, StartedJobRegistry, clean_registries from .registry import StartedJobRegistry, clean_registries
from .results import Result
from .scheduler import RQScheduler from .scheduler import RQScheduler
from .suspension import is_suspended from .suspension import is_suspended
from .timeouts import JobTimeoutException, HorseMonitorTimeoutException, UnixSignalDeathPenalty from .timeouts import JobTimeoutException, HorseMonitorTimeoutException, UnixSignalDeathPenalty
@ -139,7 +135,7 @@ class Worker:
worker_keys = get_keys(queue=queue, connection=connection) worker_keys = get_keys(queue=queue, connection=connection)
workers = [ workers = [
cls.find_by_key( cls.find_by_key(
as_text(key), connection=connection, job_class=job_class, queue_class=queue_class, serializer=serializer key, connection=connection, job_class=job_class, queue_class=queue_class, serializer=serializer
) )
for key in worker_keys for key in worker_keys
] ]
@ -1081,21 +1077,7 @@ class Worker:
started_job_registry.remove(job, pipeline=pipeline) started_job_registry.remove(job, pipeline=pipeline)
if not self.disable_default_exception_handler and not retry: if not self.disable_default_exception_handler and not retry:
failed_job_registry = FailedJobRegistry( job._handle_failure(exc_string, pipeline=pipeline)
job.origin, job.connection, job_class=self.job_class, serializer=job.serializer
)
# Exception should be saved in job hash if server
# doesn't support Redis streams
_save_exc_to_job = not self.supports_redis_streams
failed_job_registry.add(
job,
ttl=job.failure_ttl,
exc_string=exc_string,
pipeline=pipeline,
_save_exc_to_job=_save_exc_to_job,
)
if self.supports_redis_streams:
Result.create_failure(job, job.failure_ttl, exc_string=exc_string, pipeline=pipeline)
with suppress(redis.exceptions.ConnectionError): with suppress(redis.exceptions.ConnectionError):
pipeline.execute() pipeline.execute()
@ -1142,19 +1124,8 @@ class Worker:
result_ttl = job.get_result_ttl(self.default_result_ttl) result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0: if result_ttl != 0:
self.log.debug('Setting job %s status to finished', job.id) self.log.debug(f"Saving job {job.id}'s successful execution result")
job.set_status(JobStatus.FINISHED, pipeline=pipeline) job._handle_success(result_ttl, pipeline=pipeline)
# Result should be saved in job hash only if server
# doesn't support Redis streams
include_result = not self.supports_redis_streams
# Don't clobber user's meta dictionary!
job.save(pipeline=pipeline, include_meta=False, include_result=include_result)
if self.supports_redis_streams:
Result.create(
job, Result.Type.SUCCESSFUL, return_value=job._result, ttl=result_ttl, pipeline=pipeline
)
finished_job_registry = queue.finished_job_registry
finished_job_registry.add(job, result_ttl, pipeline)
job.cleanup(result_ttl, pipeline=pipeline, remove_from_queue=False) job.cleanup(result_ttl, pipeline=pipeline, remove_from_queue=False)
self.log.debug('Removing job %s from StartedJobRegistry', job.id) self.log.debug('Removing job %s from StartedJobRegistry', job.id)

@ -51,7 +51,7 @@ def unregister(worker: 'Worker', pipeline: Optional['Pipeline'] = None):
connection.execute() connection.execute()
def get_keys(queue: Optional['Queue'] = None, connection: Optional['Redis'] = None) -> Set[Any]: def get_keys(queue: Optional['Queue'] = None, connection: Optional['Redis'] = None) -> Set[str]:
"""Returns a list of worker keys for a given queue. """Returns a list of worker keys for a given queue.
Args: Args:

@ -529,11 +529,9 @@ class TestJob(RQTestCase):
job = Job.create(func=fixtures.say_hello, result_ttl=job_result_ttl) job = Job.create(func=fixtures.say_hello, result_ttl=job_result_ttl)
job.save() job.save()
self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), job_result_ttl) self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), job_result_ttl)
self.assertEqual(job.get_result_ttl(), job_result_ttl)
job = Job.create(func=fixtures.say_hello) job = Job.create(func=fixtures.say_hello)
job.save() job.save()
self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), default_ttl) self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), default_ttl)
self.assertEqual(job.get_result_ttl(), None)
def test_get_job_ttl(self): def test_get_job_ttl(self):
"""Getting job TTL.""" """Getting job TTL."""

@ -14,7 +14,7 @@ from rq.results import Result, get_key
from rq.utils import get_version, utcnow from rq.utils import get_version, utcnow
from rq.worker import Worker from rq.worker import Worker
from .fixtures import say_hello from .fixtures import say_hello, div_by_zero
@unittest.skipIf(get_version(Redis()) < (5, 0, 0), 'Skip if Redis server < 5.0') @unittest.skipIf(get_version(Redis()) < (5, 0, 0), 'Skip if Redis server < 5.0')
@ -123,6 +123,8 @@ class TestScheduledJobRegistry(RQTestCase):
self.assertEqual(job.result, 'Success') self.assertEqual(job.result, 'Success')
with patch('rq.worker.Worker.supports_redis_streams', new_callable=PropertyMock) as mock: with patch('rq.worker.Worker.supports_redis_streams', new_callable=PropertyMock) as mock:
with patch('rq.job.Job.supports_redis_streams', new_callable=PropertyMock) as job_mock:
job_mock.return_value = False
mock.return_value = False mock.return_value = False
worker = Worker([queue]) worker = Worker([queue])
worker.register_birth() worker.register_birth()
@ -164,6 +166,8 @@ class TestScheduledJobRegistry(RQTestCase):
self.assertEqual(job.exc_info, 'Error') self.assertEqual(job.exc_info, 'Error')
with patch('rq.worker.Worker.supports_redis_streams', new_callable=PropertyMock) as mock: with patch('rq.worker.Worker.supports_redis_streams', new_callable=PropertyMock) as mock:
with patch('rq.job.Job.supports_redis_streams', new_callable=PropertyMock) as job_mock:
job_mock.return_value = False
mock.return_value = False mock.return_value = False
worker = Worker([queue]) worker = Worker([queue])
worker.register_birth() worker.register_birth()
@ -199,3 +203,14 @@ class TestScheduledJobRegistry(RQTestCase):
# Returns None if latest result is a failure # Returns None if latest result is a failure
Result.create_failure(job, ttl=10, exc_string='exception') Result.create_failure(job, ttl=10, exc_string='exception')
self.assertIsNone(job.return_value(refresh=True)) self.assertIsNone(job.return_value(refresh=True))
def test_job_return_value_sync(self):
"""Test job.return_value when queue.is_async=False"""
queue = Queue(connection=self.connection, is_async=False)
job = queue.enqueue(say_hello)
# Returns None when there's no result
self.assertIsNotNone(job.return_value())
job = queue.enqueue(div_by_zero)
self.assertEqual(job.latest_result().type, Result.Type.FAILED)

Loading…
Cancel
Save