Add docs folder.
@ -0,0 +1 @@
|
||||
python-rq.org
|
@ -0,0 +1,50 @@
|
||||
baseurl: /
|
||||
exclude: design
|
||||
permalink: pretty
|
||||
|
||||
navigation:
|
||||
- text: Home
|
||||
url: /
|
||||
- text: Docs
|
||||
url: /docs/
|
||||
subs:
|
||||
- text: Queues
|
||||
url: /docs/
|
||||
- text: Workers
|
||||
url: /docs/workers/
|
||||
- text: Results
|
||||
url: /docs/results/
|
||||
- text: Jobs
|
||||
url: /docs/jobs/
|
||||
- text: Monitoring
|
||||
url: /docs/monitoring/
|
||||
- text: Connections
|
||||
url: /docs/connections/
|
||||
- text: Exceptions
|
||||
url: /docs/exceptions/
|
||||
- text: Testing
|
||||
url: /docs/testing/
|
||||
- text: Patterns
|
||||
url: /patterns/
|
||||
subs:
|
||||
- text: Heroku
|
||||
url: /patterns/
|
||||
- text: Django
|
||||
url: /patterns/django/
|
||||
- text: Sentry
|
||||
url: /patterns/sentry/
|
||||
- text: Supervisor
|
||||
url: /patterns/supervisor/
|
||||
- text: Contributing
|
||||
url: /contrib/
|
||||
subs:
|
||||
- text: Internals
|
||||
url: /contrib/
|
||||
- text: GitHub
|
||||
url: /contrib/github/
|
||||
- text: Documentation
|
||||
url: /contrib/docs/
|
||||
- text: Testing
|
||||
url: /contrib/testing/
|
||||
- text: Vagrant
|
||||
url: /contrib/vagrant/
|
@ -0,0 +1,6 @@
|
||||
<script type="text/javascript">
|
||||
// Auto-forward for incoming links on nvie.com
|
||||
if ("nvie.com" === document.location.hostname) {
|
||||
document.location = 'http://python-rq.org';
|
||||
}
|
||||
</script>
|
@ -0,0 +1,13 @@
|
||||
<script type="text/javascript">
|
||||
|
||||
var _gaq = _gaq || [];
|
||||
_gaq.push(['_setAccount', 'UA-27167945-1']);
|
||||
_gaq.push(['_trackPageview']);
|
||||
|
||||
(function() {
|
||||
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
|
||||
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
|
||||
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
|
||||
})();
|
||||
|
||||
</script>
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
layout: default
|
||||
---
|
||||
<div class="subnav">
|
||||
<ul class="inline">
|
||||
{% for link in site.navigation %}
|
||||
{% if link.url == "contrib/" %}
|
||||
{% for sublink in link.subs %}
|
||||
<li><a href="{{ sublink.url }}">{{ sublink.text }}</a></li>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
{{ content }}
|
@ -0,0 +1,38 @@
|
||||
<!DOCTYPE html>
|
||||
<base href="{{ site.baseurl }}" />
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>{{ page.title }}</title>
|
||||
<meta content="width=600" name="viewport">
|
||||
<meta content="all" name="robots">
|
||||
<link href="http://fonts.googleapis.com/css?family=Lato:light,regular,regularitalic,lightitalic,bold&v1" media="all" rel="stylesheet" type="text/css">
|
||||
<link href='http://fonts.googleapis.com/css?family=Droid+Sans+Mono' media="all" rel='stylesheet' type='text/css'>
|
||||
<link href="/css/screen.css" media="screen" rel="stylesheet" type="text/css">
|
||||
<link href="/css/syntax.css" media="screen" rel="stylesheet" type="text/css">
|
||||
<link href="/favicon.png" rel="icon" type="image/png">
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<a href="http://git.io/rq"><img class="nomargin" style="position: absolute; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_orange_ff7600.png" alt="Fork me on GitHub"></a>
|
||||
|
||||
<ul class="inline">
|
||||
{% for link in site.navigation %}
|
||||
<li><a href="{{ link.url }}">{{ link.text }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</header>
|
||||
|
||||
<section class="container">
|
||||
{{ content }}
|
||||
</section>
|
||||
|
||||
<footer>
|
||||
<p>RQ is written by <a href="http://nvie.com/about">Vincent Driessen</a>.</p>
|
||||
<p>It is open sourced under the terms of the <a href="https://raw.github.com/nvie/rq/master/LICENSE">BSD license</a>.</p>
|
||||
</footer>
|
||||
|
||||
{% include forward.html %}
|
||||
{% include ga_tracking.html %}
|
||||
</body>
|
||||
</html>
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
layout: default
|
||||
---
|
||||
<div class="subnav">
|
||||
<ul class="inline">
|
||||
{% for link in site.navigation %}
|
||||
{% if link.url == "/docs/" %}
|
||||
{% for sublink in link.subs %}
|
||||
<li><a href="{{ sublink.url }}">{{ sublink.text }}</a></li>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
{{ content }}
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
layout: default
|
||||
---
|
||||
<div class="subnav">
|
||||
<ul class="inline">
|
||||
{% for link in site.navigation %}
|
||||
{% if link.url == "/patterns/" %}
|
||||
{% for sublink in link.subs %}
|
||||
<li><a href="{{ sublink.url }}">{{ sublink.text }}</a></li>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
{{ content }}
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
layout: contrib
|
||||
---
|
||||
|
||||
### Running docs locally
|
||||
|
||||
To build the docs, run [jekyll](http://jekyllrb.com/):
|
||||
|
||||
```
|
||||
jekyll serve
|
||||
```
|
||||
|
||||
If you rather use Vagrant, see [these instructions][v].
|
||||
|
||||
[v]: {{site.baseurl}}contrib/vagrant/
|
@ -0,0 +1,11 @@
|
||||
---
|
||||
title: "Contributing to RQ"
|
||||
layout: contrib
|
||||
---
|
||||
|
||||
If you'd like to contribute to RQ, simply [fork](https://github.com/nvie/rq)
|
||||
the project on GitHub and submit a pull request.
|
||||
|
||||
Please bear in mind the philosiphy behind RQ: it should rather remain small and
|
||||
simple, than packed with features. And it should value insightfulness over
|
||||
performance.
|
@ -0,0 +1,63 @@
|
||||
---
|
||||
title: "RQ: Simple job queues for Python"
|
||||
layout: contrib
|
||||
---
|
||||
|
||||
This document describes how RQ works internally when enqueuing or dequeueing.
|
||||
|
||||
|
||||
## Enqueueing internals
|
||||
|
||||
Whenever a function call gets enqueued, RQ does two things:
|
||||
|
||||
* It creates a job instance representing the delayed function call and persists
|
||||
it in a Redis [hash][h]; and
|
||||
* It pushes the given job's ID onto the requested Redis queue.
|
||||
|
||||
All jobs are stored in Redis under the `rq:job:` prefix, for example:
|
||||
|
||||
rq:job:55528e58-9cac-4e05-b444-8eded32e76a1
|
||||
|
||||
The keys of such a job [hash][h] are:
|
||||
|
||||
created_at => '2012-02-13 14:35:16+0000'
|
||||
enqueued_at => '2012-02-13 14:35:16+0000'
|
||||
origin => 'default'
|
||||
data => <pickled representation of the function call>
|
||||
description => "count_words_at_url('http://nvie.com')"
|
||||
|
||||
Depending on whether or not the job has run successfully or has failed, the
|
||||
following keys are available, too:
|
||||
|
||||
ended_at => '2012-02-13 14:41:33+0000'
|
||||
result => <pickled return value>
|
||||
exc_info => <exception information>
|
||||
|
||||
[h]: http://redis.io/topics/data-types#hashes
|
||||
|
||||
|
||||
## Dequeueing internals
|
||||
|
||||
Whenever a dequeue is requested, an RQ worker does two things:
|
||||
|
||||
* It pops a job ID from the queue, and fetches the job data belonging to that
|
||||
job ID;
|
||||
* It starts executing the function call.
|
||||
* If the job succeeds, its return value is written to the `result` hash key and
|
||||
the hash itself is expired after 500 seconds; or
|
||||
* If the job failes, the exception information is written to the `exc_info`
|
||||
hash key and the job ID is pushed onto the `failed` queue.
|
||||
|
||||
|
||||
## Cancelling jobs
|
||||
|
||||
Any job ID that is encountered by a worker for which no job hash is found in
|
||||
Redis is simply ignored. This makes it easy to cancel jobs by simply removing
|
||||
the job hash. In Python:
|
||||
|
||||
from rq import cancel_job
|
||||
cancel_job('2eafc1e6-48c2-464b-a0ff-88fd199d039c')
|
||||
|
||||
Note that it is irrelevant on which queue the job resides. When a worker
|
||||
eventually pops the job ID from the queue and notes that the Job hash does not
|
||||
exist (anymore), it simply discards the job ID and continues with the next.
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
title: "Testing"
|
||||
layout: contrib
|
||||
---
|
||||
|
||||
### Testing RQ locally
|
||||
|
||||
To run tests locally;
|
||||
|
||||
```
|
||||
tox
|
||||
```
|
||||
|
||||
If you rather use Vagrant, see [these instructions][v].
|
||||
|
||||
[v]: {{site.baseurl}}contrib/vagrant/
|
@ -0,0 +1,50 @@
|
||||
---
|
||||
title: "Using Vagrant"
|
||||
layout: contrib
|
||||
---
|
||||
|
||||
If you don't feel like installing dependencies on your main development
|
||||
machine, you can use [Vagrant](https://www.vagrantup.com/). Here's how you run
|
||||
your tests and build the documentation on Vagrant.
|
||||
|
||||
|
||||
### Running tests in Vagrant
|
||||
|
||||
To create a working Vagrant environment, use the following;
|
||||
|
||||
```
|
||||
vagrant init ubuntu/trusty64
|
||||
vagrant up
|
||||
vagrant ssh -- "sudo apt-get -y install redis-server python-dev python-pip"
|
||||
vagrant ssh -- "sudo pip install --no-input redis hiredis mock"
|
||||
vagrant ssh -- "(cd /vagrant; ./run_tests)"
|
||||
```
|
||||
|
||||
|
||||
### Running docs on Vagrant
|
||||
|
||||
```
|
||||
vagrant init ubuntu/trusty64
|
||||
vagrant up
|
||||
vagrant ssh -- "sudo apt-get -y install ruby-dev nodejs"
|
||||
vagrant ssh -- "sudo gem install jekyll"
|
||||
vagrant ssh -- "(cd /vagrant; jekyll serve)"
|
||||
```
|
||||
|
||||
You'll also need to add a port forward entry to your `Vagrantfile`;
|
||||
|
||||
```
|
||||
config.vm.network "forwarded_port", guest: 4000, host: 4001
|
||||
```
|
||||
|
||||
Then you can access the docs using;
|
||||
|
||||
```
|
||||
http://127.0.0.1:4001
|
||||
```
|
||||
|
||||
You also may need to forcibly kill Jekyll if you ctrl+c;
|
||||
|
||||
```
|
||||
vagrant ssh -- "sudo killall -9 jekyll"
|
||||
```
|
@ -0,0 +1,48 @@
|
||||
/* http://meyerweb.com/eric/tools/css/reset/
|
||||
v2.0 | 20110126
|
||||
License: none (public domain)
|
||||
*/
|
||||
|
||||
html, body, div, span, applet, object, iframe,
|
||||
h1, h2, h3, h4, h5, h6, p, blockquote, pre,
|
||||
a, abbr, acronym, address, big, cite, code,
|
||||
del, dfn, em, img, ins, kbd, q, s, samp,
|
||||
small, strike, strong, sub, sup, tt, var,
|
||||
b, u, i, center,
|
||||
dl, dt, dd, ol, ul, li,
|
||||
fieldset, form, label, legend,
|
||||
table, caption, tbody, tfoot, thead, tr, th, td,
|
||||
article, aside, canvas, details, embed,
|
||||
figure, figcaption, footer, header, hgroup,
|
||||
menu, nav, output, ruby, section, summary,
|
||||
time, mark, audio, video {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
font-size: 100%;
|
||||
font: inherit;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
/* HTML5 display-role reset for older browsers */
|
||||
article, aside, details, figcaption, figure,
|
||||
footer, header, hgroup, menu, nav, section {
|
||||
display: block;
|
||||
}
|
||||
body {
|
||||
line-height: 1;
|
||||
}
|
||||
ol, ul {
|
||||
list-style: none;
|
||||
}
|
||||
blockquote, q {
|
||||
quotes: none;
|
||||
}
|
||||
blockquote:before, blockquote:after,
|
||||
q:before, q:after {
|
||||
content: '';
|
||||
content: none;
|
||||
}
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
border-spacing: 0;
|
||||
}
|
@ -0,0 +1,347 @@
|
||||
@import url("reset.css");
|
||||
|
||||
html
|
||||
{
|
||||
font-size: 62.5%;
|
||||
-webkit-text-size-adjust: 110%;
|
||||
}
|
||||
|
||||
body
|
||||
{
|
||||
background: #DBE0DF url(../img/bg.png) 50% 0 repeat-y !important;
|
||||
height: 100%;
|
||||
font-family: Lato, sans-serif;
|
||||
font-size: 150%;
|
||||
font-weight: 300;
|
||||
line-height: 1.55;
|
||||
padding: 0 30px 80px;
|
||||
}
|
||||
|
||||
header
|
||||
{
|
||||
background: url(../img/ribbon.png) no-repeat 50% 0;
|
||||
max-width: 430px;
|
||||
width: 100%;
|
||||
text-align: center;
|
||||
|
||||
padding: 240px 0 1em 0;
|
||||
border-bottom: 1px dashed #e1e1e1;
|
||||
margin: 0 auto 2em auto;
|
||||
}
|
||||
|
||||
ul.inline
|
||||
{
|
||||
list-style-type: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ul.inline li
|
||||
{
|
||||
display: inline;
|
||||
margin: 0 10px;
|
||||
}
|
||||
|
||||
.subnav ul.inline li
|
||||
{
|
||||
margin: 0 6px;
|
||||
}
|
||||
|
||||
header a
|
||||
{
|
||||
color: #3a3a3a;
|
||||
border: 0;
|
||||
font-size: 110%;
|
||||
font-weight: 600;
|
||||
text-decoration: none;
|
||||
transition: color linear 0.1s;
|
||||
-webkit-transition: color linear 0.1s;
|
||||
-moz-transition: color linear 0.1s;
|
||||
}
|
||||
|
||||
header a:hover
|
||||
{
|
||||
border-bottom-color: rgba(0, 0, 0, 0.1);
|
||||
color: rgba(0, 0, 0, 0.4);
|
||||
}
|
||||
|
||||
.subnav
|
||||
{
|
||||
text-align: center;
|
||||
font-size: 94%;
|
||||
margin: -3em auto 2em auto;
|
||||
}
|
||||
|
||||
.subnav li
|
||||
{
|
||||
background-color: white;
|
||||
padding: 0 4px;
|
||||
}
|
||||
|
||||
.subnav a
|
||||
{
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.container
|
||||
{
|
||||
margin: 0 auto;
|
||||
max-width: 430px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
footer
|
||||
{
|
||||
margin: 2em auto;
|
||||
max-width: 430px;
|
||||
width: 100%;
|
||||
border-top: 1px dashed #e1e1e1;
|
||||
padding-top: 1em;
|
||||
}
|
||||
|
||||
footer p
|
||||
{
|
||||
text-align: center;
|
||||
font-size: 90%;
|
||||
font-style: italic;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
footer a
|
||||
{
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
pre
|
||||
{
|
||||
margin: 0 0 1em 1em;
|
||||
padding: 1em 1.8em;
|
||||
color: #222;
|
||||
border-bottom: 1px solid #ccc;
|
||||
border-right: 1px solid #ccc;
|
||||
background: #F3F3F0 url(../img/bq.png) top left no-repeat;
|
||||
line-height: 1.15em;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
code
|
||||
{
|
||||
font-family: 'Droid Sans Mono', monospace;
|
||||
font-weight: 400;
|
||||
font-size: 80%;
|
||||
|
||||
line-height: 0.5em;
|
||||
|
||||
border: 1px solid #efeaea;
|
||||
padding: 0.2em 0.4em;
|
||||
}
|
||||
|
||||
pre code
|
||||
{
|
||||
border: none;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
h1
|
||||
{
|
||||
font-size: 280%;
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
.ir
|
||||
{
|
||||
display: block;
|
||||
border: 0;
|
||||
text-indent: -999em;
|
||||
overflow: hidden;
|
||||
background-color: transparent;
|
||||
background-repeat: no-repeat;
|
||||
text-align: left;
|
||||
direction: ltr;
|
||||
}
|
||||
|
||||
.ir br
|
||||
{
|
||||
display: none;
|
||||
}
|
||||
|
||||
h1#logo
|
||||
{
|
||||
margin: 0 auto;
|
||||
width: 305px;
|
||||
height: 186px;
|
||||
background-image: url(../img/logo2.png);
|
||||
}
|
||||
|
||||
/*
|
||||
h1:hover:after
|
||||
{
|
||||
color: rgba(0, 0, 0, 0.3);
|
||||
content: attr(title);
|
||||
font-size: 60%;
|
||||
font-weight: 300;
|
||||
margin: 0 0 0 0.5em;
|
||||
}
|
||||
*/
|
||||
|
||||
h2
|
||||
{
|
||||
font-size: 200%;
|
||||
font-weight: 400;
|
||||
margin: 0 0 0.4em;
|
||||
}
|
||||
|
||||
h3
|
||||
{
|
||||
font-size: 135%;
|
||||
font-weight: 400;
|
||||
margin: 0 0 0.25em;
|
||||
}
|
||||
|
||||
p
|
||||
{
|
||||
color: rgba(0, 0, 0, 0.7);
|
||||
margin: 0 0 1em;
|
||||
}
|
||||
|
||||
p:last-child
|
||||
{
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
img
|
||||
{
|
||||
border-radius: 4px;
|
||||
float: left;
|
||||
margin: 6px 12px 15px 0;
|
||||
-moz-border-radius: 4px;
|
||||
-webkit-border-radius: 4px;
|
||||
}
|
||||
|
||||
.nomargin
|
||||
{
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
a
|
||||
{
|
||||
border-bottom: 1px solid rgba(65, 131, 196, 0.1);
|
||||
color: rgb(65, 131, 196);
|
||||
font-weight: 600;
|
||||
text-decoration: none;
|
||||
transition: color linear 0.1s;
|
||||
-webkit-transition: color linear 0.1s;
|
||||
-moz-transition: color linear 0.1s;
|
||||
}
|
||||
|
||||
a:hover
|
||||
{
|
||||
border-bottom-color: rgba(0, 0, 0, 0.1);
|
||||
color: rgba(0, 0, 0, 0.4);
|
||||
}
|
||||
|
||||
em
|
||||
{
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
strong
|
||||
{
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
acronym
|
||||
{
|
||||
border-bottom: 1px dotted rgba(0, 0, 0, 0.1);
|
||||
cursor: help;
|
||||
}
|
||||
|
||||
blockquote
|
||||
{
|
||||
font-style: italic;
|
||||
padding: 1em;
|
||||
}
|
||||
|
||||
ul
|
||||
{
|
||||
list-style: circle;
|
||||
margin: 0 0 1em 2em;
|
||||
color: rgba(0, 0, 0, 0.7);
|
||||
}
|
||||
|
||||
li
|
||||
{
|
||||
font-size: 100%;
|
||||
}
|
||||
|
||||
ol
|
||||
{
|
||||
list-style-type: decimal;
|
||||
margin: 0 0 1em 2em;
|
||||
color: rgba(0, 0, 0, 0.7);
|
||||
}
|
||||
|
||||
li
|
||||
{
|
||||
font-size: 100%;
|
||||
}
|
||||
|
||||
.warning
|
||||
{
|
||||
position: relative;
|
||||
padding: 7px 15px;
|
||||
margin-bottom: 18px;
|
||||
color: #404040;
|
||||
background-color: #eedc94;
|
||||
background-repeat: repeat-x;
|
||||
background-image: -khtml-gradient(linear, left top, left bottom, from(#fceec1), to(#eedc94));
|
||||
background-image: -moz-linear-gradient(top, #fceec1, #eedc94);
|
||||
background-image: -ms-linear-gradient(top, #fceec1, #eedc94);
|
||||
background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #fceec1), color-stop(100%, #eedc94));
|
||||
background-image: -webkit-linear-gradient(top, #fceec1, #eedc94);
|
||||
background-image: -o-linear-gradient(top, #fceec1, #eedc94);
|
||||
background-image: linear-gradient(top, #fceec1, #eedc94);
|
||||
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fceec1', endColorstr='#eedc94', GradientType=0);
|
||||
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
|
||||
border-color: #eedc94 #eedc94 #e4c652;
|
||||
border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
|
||||
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
|
||||
border-width: 1px;
|
||||
border-style: solid;
|
||||
-webkit-border-radius: 4px;
|
||||
-moz-border-radius: 4px;
|
||||
border-radius: 4px;
|
||||
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
|
||||
-moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
|
||||
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
|
||||
}
|
||||
|
||||
.warning p
|
||||
{
|
||||
}
|
||||
|
||||
.alert-message .close {
|
||||
*margin-top: 3px;
|
||||
/* IE7 spacing */
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@media screen and (max-width: 1400px)
|
||||
{
|
||||
body
|
||||
{
|
||||
padding-bottom: 60px;
|
||||
padding-top: 60px;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 600px)
|
||||
{
|
||||
body
|
||||
{
|
||||
padding-bottom: 40px;
|
||||
padding-top: 30px;
|
||||
}
|
||||
}
|
||||
*/
|
@ -0,0 +1,61 @@
|
||||
.highlight { background: #ffffff; }
|
||||
.highlight .c { color: #999988; } /* Comment */
|
||||
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
|
||||
.highlight .k { font-weight: bold; color: #555555; } /* Keyword */
|
||||
.highlight .kn { font-weight: bold; color: #555555; } /* Keyword */
|
||||
.highlight .o { font-weight: bold; color: #555555; } /* Operator */
|
||||
.highlight .cm { color: #999988; } /* Comment.Multiline */
|
||||
.highlight .cp { color: #999999; font-weight: bold } /* Comment.Preproc */
|
||||
.highlight .c1 { color: #999988; } /* Comment.Single */
|
||||
.highlight .cs { color: #999999; font-weight: bold; } /* Comment.Special */
|
||||
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
|
||||
.highlight .gd .x { color: #000000; background-color: #ffaaaa } /* Generic.Deleted.Specific */
|
||||
.highlight .ge {} /* Generic.Emph */
|
||||
.highlight .gr { color: #aa0000 } /* Generic.Error */
|
||||
.highlight .gh { color: #999999 } /* Generic.Heading */
|
||||
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
|
||||
.highlight .gi .x { color: #000000; background-color: #aaffaa } /* Generic.Inserted.Specific */
|
||||
.highlight .go { color: #888888 } /* Generic.Output */
|
||||
.highlight .gp { color: #555555 } /* Generic.Prompt */
|
||||
.highlight .gs { font-weight: bold } /* Generic.Strong */
|
||||
.highlight .gu { color: #aaaaaa } /* Generic.Subheading */
|
||||
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
|
||||
.highlight .kc { font-weight: bold } /* Keyword.Constant */
|
||||
.highlight .kd { font-weight: bold } /* Keyword.Declaration */
|
||||
.highlight .kp { font-weight: bold } /* Keyword.Pseudo */
|
||||
.highlight .kr { font-weight: bold } /* Keyword.Reserved */
|
||||
.highlight .kt { color: #445588; font-weight: bold } /* Keyword.Type */
|
||||
.highlight .m { color: #009999 } /* Literal.Number */
|
||||
.highlight .s { color: #d14 } /* Literal.String */
|
||||
.highlight .na { color: #008080 } /* Name.Attribute */
|
||||
.highlight .nb { color: #0086B3 } /* Name.Builtin */
|
||||
.highlight .nc { color: #445588; font-weight: bold } /* Name.Class */
|
||||
.highlight .no { color: #008080 } /* Name.Constant */
|
||||
.highlight .ni { color: #800080 } /* Name.Entity */
|
||||
.highlight .ne { color: #aa0000; font-weight: bold } /* Name.Exception */
|
||||
.highlight .nf { color: #aa0000; font-weight: bold } /* Name.Function */
|
||||
.highlight .nn { color: #555555 } /* Name.Namespace */
|
||||
.highlight .nt { color: #000080 } /* Name.Tag */
|
||||
.highlight .nv { color: #008080 } /* Name.Variable */
|
||||
.highlight .ow { font-weight: bold } /* Operator.Word */
|
||||
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
|
||||
.highlight .mf { color: #009999 } /* Literal.Number.Float */
|
||||
.highlight .mh { color: #009999 } /* Literal.Number.Hex */
|
||||
.highlight .mi { color: #009999 } /* Literal.Number.Integer */
|
||||
.highlight .mo { color: #009999 } /* Literal.Number.Oct */
|
||||
.highlight .sb { color: #d14 } /* Literal.String.Backtick */
|
||||
.highlight .sc { color: #d14 } /* Literal.String.Char */
|
||||
.highlight .sd { color: #d14 } /* Literal.String.Doc */
|
||||
.highlight .s2 { color: #d14 } /* Literal.String.Double */
|
||||
.highlight .se { color: #d14 } /* Literal.String.Escape */
|
||||
.highlight .sh { color: #d14 } /* Literal.String.Heredoc */
|
||||
.highlight .si { color: #d14 } /* Literal.String.Interpol */
|
||||
.highlight .sx { color: #d14 } /* Literal.String.Other */
|
||||
.highlight .sr { color: #009926 } /* Literal.String.Regex */
|
||||
.highlight .s1 { color: #d14 } /* Literal.String.Single */
|
||||
.highlight .ss { color: #990073 } /* Literal.String.Symbol */
|
||||
.highlight .bp { color: #999999 } /* Name.Builtin.Pseudo */
|
||||
.highlight .vc { color: #008080 } /* Name.Variable.Class */
|
||||
.highlight .vg { color: #008080 } /* Name.Variable.Global */
|
||||
.highlight .vi { color: #008080 } /* Name.Variable.Instance */
|
||||
.highlight .il { color: #009999 } /* Literal.Number.Integer.Long */
|
@ -0,0 +1,145 @@
|
||||
---
|
||||
title: "RQ: Connections"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
Although RQ features the `use_connection()` command for convenience, it
|
||||
is deprecated, since it pollutes the global namespace. Instead, prefer explicit
|
||||
connection management using the `with Connection(...):` context manager, or
|
||||
pass in Redis connection references to queues directly.
|
||||
|
||||
|
||||
## Single Redis connection (easy)
|
||||
|
||||
<div class="warning">
|
||||
<img style="float: right; margin-right: -60px; margin-top: -38px" src="{{site.baseurl}}img/warning.png" />
|
||||
<strong>Note:</strong>
|
||||
<p>
|
||||
The use of <code>use_connection</code> is deprecated.
|
||||
Please don't use `use_connection` in your scripts.
|
||||
Instead, use explicit connection management.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
In development mode, to connect to a default, local Redis server:
|
||||
|
||||
{% highlight python %}
|
||||
from rq import use_connection
|
||||
use_connection()
|
||||
{% endhighlight %}
|
||||
|
||||
In production, to connect to a specific Redis server:
|
||||
|
||||
{% highlight python %}
|
||||
from redis import Redis
|
||||
from rq import use_connection
|
||||
|
||||
redis = Redis('my.host.org', 6789, password='secret')
|
||||
use_connection(redis)
|
||||
{% endhighlight %}
|
||||
|
||||
Be aware of the fact that `use_connection` pollutes the global namespace. It
|
||||
also implies that you can only ever use a single connection.
|
||||
|
||||
|
||||
## Multiple Redis connections
|
||||
|
||||
However, the single connection pattern facilitates only those cases where you
|
||||
connect to a single Redis instance, and where you affect global context (by
|
||||
replacing the existing connection with the `use_connection()` call). You can
|
||||
only use this pattern when you are in full control of your web stack.
|
||||
|
||||
In any other situation, or when you want to use multiple connections, you
|
||||
should use `Connection` contexts or pass connections around explicitly.
|
||||
|
||||
|
||||
### Explicit connections (precise, but tedious)
|
||||
|
||||
Each RQ object instance (queues, workers, jobs) has a `connection` keyword
|
||||
argument that can be passed to the constructor. Using this, you don't need to
|
||||
use `use_connection()`. Instead, you can create your queues like this:
|
||||
|
||||
{% highlight python %}
|
||||
from rq import Queue
|
||||
from redis import Redis
|
||||
|
||||
conn1 = Redis('localhost', 6379)
|
||||
conn2 = Redis('remote.host.org', 9836)
|
||||
|
||||
q1 = Queue('foo', connection=conn1)
|
||||
q2 = Queue('bar', connection=conn2)
|
||||
{% endhighlight %}
|
||||
|
||||
Every job that is enqueued on a queue will know what connection it belongs to.
|
||||
The same goes for the workers.
|
||||
|
||||
This approach is very precise, but rather verbose, and therefore, tedious.
|
||||
|
||||
|
||||
### Connection contexts (precise and concise)
|
||||
|
||||
There is a better approach if you want to use multiple connections, though.
|
||||
Each RQ object instance, upon creation, will use the topmost Redis connection
|
||||
on the RQ connection stack, which is a mechanism to temporarily replace the
|
||||
default connection to be used.
|
||||
|
||||
An example will help to understand it:
|
||||
|
||||
{% highlight python %}
|
||||
from rq import Queue, Connection
|
||||
from redis import Redis
|
||||
|
||||
with Connection(Redis('localhost', 6379)):
|
||||
q1 = Queue('foo')
|
||||
with Connection(Redis('remote.host.org', 9836)):
|
||||
q2 = Queue('bar')
|
||||
q3 = Queue('qux')
|
||||
|
||||
assert q1.connection != q2.connection
|
||||
assert q2.connection != q3.connection
|
||||
assert q1.connection == q3.connection
|
||||
{% endhighlight %}
|
||||
|
||||
You can think of this as if, within the `Connection` context, every newly
|
||||
created RQ object instance will have the `connection` argument set implicitly.
|
||||
Enqueueing a job with `q2` will enqueue it in the second (remote) Redis
|
||||
backend, even when outside of the connection context.
|
||||
|
||||
|
||||
### Pushing/popping connections
|
||||
|
||||
If your code does not allow you to use a `with` statement, for example, if you
|
||||
want to use this to set up a unit test, you can use the `push_connection()` and
|
||||
`pop_connection()` methods instead of using the context manager.
|
||||
|
||||
{% highlight python %}
|
||||
import unittest
|
||||
from rq import Queue
|
||||
from rq import push_connection, pop_connection
|
||||
|
||||
class MyTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
push_connection(Redis())
|
||||
|
||||
def tearDown(self):
|
||||
pop_connection()
|
||||
|
||||
def test_foo(self):
|
||||
"""Any queues created here use local Redis."""
|
||||
q = Queue()
|
||||
...
|
||||
{% endhighlight %}
|
||||
|
||||
### Sentinel support
|
||||
|
||||
To use redis sentinel, you must specify a dictionary in the configuration file.
|
||||
Using this setting in conjunction with the systemd or docker containers with the
|
||||
automatic restart option allows workers and RQ to have a fault-tolerant connection to the redis.
|
||||
|
||||
{% highlight python %}
|
||||
SENTINEL: {'INSTANCES':[('remote.host1.org', 26379), ('remote.host2.org', 26379), ('remote.host3.org', 26379)],
|
||||
'SOCKET_TIMEOUT': None,
|
||||
'PASSWORD': 'secret',
|
||||
'DB': 2,
|
||||
'MASTER_NAME': 'master'}
|
||||
{% endhighlight %}
|
@ -0,0 +1,81 @@
|
||||
---
|
||||
title: "RQ: Exceptions"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
Jobs can fail due to exceptions occurring. When your RQ workers run in the
|
||||
background, how do you get notified of these exceptions?
|
||||
|
||||
## Default: the `failed` queue
|
||||
|
||||
The default safety net for RQ is the `failed` queue. Every job that fails
|
||||
execution is stored in here, along with its exception information (type,
|
||||
value, traceback). While this makes sure no failing jobs "get lost", this is
|
||||
of no use to get notified pro-actively about job failure.
|
||||
|
||||
|
||||
## Custom exception handlers
|
||||
|
||||
Starting from version 0.3.1, RQ supports registering custom exception
|
||||
handlers. This makes it possible to replace the default behaviour (sending
|
||||
the job to the `failed` queue) altogether, or to take additional steps when an
|
||||
exception occurs.
|
||||
|
||||
To do this, register your custom exception handler to an RQ worker as follows:
|
||||
|
||||
{% highlight python %}
|
||||
with Connection():
|
||||
q = Queue()
|
||||
w = Worker([q])
|
||||
w.push_exc_handler(my_handler)
|
||||
w.work()
|
||||
{% endhighlight %}
|
||||
|
||||
While the exception handlers are a FILO stack, most times you only want to
|
||||
register a single handler. Therefore, for convenience, you can pass it to the
|
||||
constructor directly, too:
|
||||
|
||||
{% highlight python %}
|
||||
with Connection():
|
||||
w = Worker([q], exception_handlers=[my_handler, self.move_to_failed_queue])
|
||||
...
|
||||
{% endhighlight %}
|
||||
|
||||
The handler itself is a function that takes the following parameters: `job`,
|
||||
`exc_type`, `exc_value` and `traceback`:
|
||||
|
||||
{% highlight python %}
|
||||
def my_handler(job, exc_type, exc_value, traceback):
|
||||
# do custom things here
|
||||
# for example, write the exception info to a DB
|
||||
...
|
||||
{% endhighlight %}
|
||||
|
||||
You might also see the three exception arguments encoded as:
|
||||
|
||||
{% highlight python %}
|
||||
def my_handler(job, *exc_info):
|
||||
# do custom things here
|
||||
...
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Chaining exception handlers
|
||||
|
||||
The handler itself is responsible for deciding whether or not the exception
|
||||
handling is done, or should fall through to the next handler on the stack.
|
||||
The handler can indicate this by returning a boolean. `False` means stop
|
||||
processing exceptions, `True` means continue and fall through to the next
|
||||
exception handler on the stack.
|
||||
|
||||
It's important to know for implementors that, by default, when the handler
|
||||
doesn't have an explicit return value (thus `None`), this will be interpreted
|
||||
as `True` (i.e. continue with the next handler).
|
||||
|
||||
To replace the default behaviour (i.e. moving the job to the `failed` queue),
|
||||
use a custom exception handler that doesn't fall through, for example:
|
||||
|
||||
{% highlight python %}
|
||||
def black_hole(job, *exc_info):
|
||||
return False
|
||||
{% endhighlight %}
|
@ -0,0 +1,229 @@
|
||||
---
|
||||
title: "RQ: Documentation"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
A _job_ is a Python object, representing a function that is invoked
|
||||
asynchronously in a worker (background) process. Any Python function can be
|
||||
invoked asynchronously, by simply pushing a reference to the function and its
|
||||
arguments onto a queue. This is called _enqueueing_.
|
||||
|
||||
|
||||
## Enqueueing jobs
|
||||
|
||||
To put jobs on queues, first declare a function:
|
||||
|
||||
{% highlight python %}
|
||||
import requests
|
||||
|
||||
def count_words_at_url(url):
|
||||
resp = requests.get(url)
|
||||
return len(resp.text.split())
|
||||
{% endhighlight %}
|
||||
|
||||
Noticed anything? There's nothing special about this function! Any Python
|
||||
function call can be put on an RQ queue.
|
||||
|
||||
To put this potentially expensive word count for a given URL in the background,
|
||||
simply do this:
|
||||
|
||||
{% highlight python %}
|
||||
from rq import Queue
|
||||
from redis import Redis
|
||||
from somewhere import count_words_at_url
|
||||
|
||||
# Tell RQ what Redis connection to use
|
||||
redis_conn = Redis()
|
||||
q = Queue(connection=redis_conn) # no args implies the default queue
|
||||
|
||||
# Delay execution of count_words_at_url('http://nvie.com')
|
||||
job = q.enqueue(count_words_at_url, 'http://nvie.com')
|
||||
print job.result # => None
|
||||
|
||||
# Now, wait a while, until the worker is finished
|
||||
time.sleep(2)
|
||||
print job.result # => 889
|
||||
{% endhighlight %}
|
||||
|
||||
If you want to put the work on a specific queue, simply specify its name:
|
||||
|
||||
{% highlight python %}
|
||||
q = Queue('low', connection=redis_conn)
|
||||
q.enqueue(count_words_at_url, 'http://nvie.com')
|
||||
{% endhighlight %}
|
||||
|
||||
Notice the `Queue('low')` in the example above? You can use any queue name, so
|
||||
you can quite flexibly distribute work to your own desire. A common naming
|
||||
pattern is to name your queues after priorities (e.g. `high`, `medium`,
|
||||
`low`).
|
||||
|
||||
In addition, you can add a few options to modify the behaviour of the queued
|
||||
job. By default, these are popped out of the kwargs that will be passed to the
|
||||
job function.
|
||||
|
||||
* `timeout` specifies the maximum runtime of the job before it'll be considered
|
||||
'lost'. Its default unit is second and it can be an integer or a string representing an integer(e.g. `2`, `'2'`). Furthermore, it can be a string with specify unit including hour, minute, second(e.g. `'1h'`, `'3m'`, `'5s'`).
|
||||
* `result_ttl` specifies the expiry time of the key where the job result will
|
||||
be stored
|
||||
* `ttl` specifies the maximum queued time of the job before it'll be cancelled
|
||||
* `depends_on` specifies another job (or job id) that must complete before this
|
||||
job will be queued
|
||||
* `job_id` allows you to manually specify this job's `job_id`
|
||||
* `at_front` will place the job at the *front* of the queue, instead of the
|
||||
back
|
||||
* `kwargs` and `args` lets you bypass the auto-pop of these arguments, ie:
|
||||
specify a `timeout` argument for the underlying job function.
|
||||
|
||||
In the last case, it may be advantageous to instead use the explicit version of
|
||||
`.enqueue()`, `.enqueue_call()`:
|
||||
|
||||
{% highlight python %}
|
||||
q = Queue('low', connection=redis_conn)
|
||||
q.enqueue_call(func=count_words_at_url,
|
||||
args=('http://nvie.com',),
|
||||
timeout=30)
|
||||
{% endhighlight %}
|
||||
|
||||
For cases where the web process doesn't have access to the source code running
|
||||
in the worker (i.e. code base X invokes a delayed function from code base Y),
|
||||
you can pass the function as a string reference, too.
|
||||
|
||||
{% highlight python %}
|
||||
q = Queue('low', connection=redis_conn)
|
||||
q.enqueue('my_package.my_module.my_func', 3, 4)
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Working with Queues
|
||||
|
||||
Besides enqueuing jobs, Queues have a few useful methods:
|
||||
|
||||
{% highlight python %}
|
||||
from rq import Queue
|
||||
from redis import Redis
|
||||
|
||||
redis_conn = Redis()
|
||||
q = Queue(connection=redis_conn)
|
||||
|
||||
# Getting the number of jobs in the queue
|
||||
print len(q)
|
||||
|
||||
# Retrieving jobs
|
||||
queued_job_ids = q.job_ids # Gets a list of job IDs from the queue
|
||||
queued_jobs = q.jobs # Gets a list of enqueued job instances
|
||||
job = q.fetch_job('my_id') # Returns job having ID "my_id"
|
||||
{% endhighlight %}
|
||||
|
||||
### On the Design
|
||||
|
||||
With RQ, you don't have to set up any queues upfront, and you don't have to
|
||||
specify any channels, exchanges, routing rules, or whatnot. You can just put
|
||||
jobs onto any queue you want. As soon as you enqueue a job to a queue that
|
||||
does not exist yet, it is created on the fly.
|
||||
|
||||
RQ does _not_ use an advanced broker to do the message routing for you. You
|
||||
may consider this an awesome advantage or a handicap, depending on the problem
|
||||
you're solving.
|
||||
|
||||
Lastly, it does not speak a portable protocol, since it depends on [pickle][p]
|
||||
to serialize the jobs, so it's a Python-only system.
|
||||
|
||||
|
||||
## The delayed result
|
||||
|
||||
When jobs get enqueued, the `queue.enqueue()` method returns a `Job` instance.
|
||||
This is nothing more than a proxy object that can be used to check the outcome
|
||||
of the actual job.
|
||||
|
||||
For this purpose, it has a convenience `result` accessor property, that
|
||||
will return `None` when the job is not yet finished, or a non-`None` value when
|
||||
the job has finished (assuming the job _has_ a return value in the first place,
|
||||
of course).
|
||||
|
||||
|
||||
## The `@job` decorator
|
||||
If you're familiar with Celery, you might be used to its `@task` decorator.
|
||||
Starting from RQ >= 0.3, there exists a similar decorator:
|
||||
|
||||
{% highlight python %}
|
||||
from rq.decorators import job
|
||||
|
||||
@job('low', connection=my_redis_conn, timeout=5)
|
||||
def add(x, y):
|
||||
return x + y
|
||||
|
||||
job = add.delay(3, 4)
|
||||
time.sleep(1)
|
||||
print job.result
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Bypassing workers
|
||||
|
||||
For testing purposes, you can enqueue jobs without delegating the actual
|
||||
execution to a worker (available since version 0.3.1). To do this, pass the
|
||||
`async=False` argument into the Queue constructor:
|
||||
|
||||
{% highlight pycon %}
|
||||
>>> q = Queue('low', async=False, connection=my_redis_conn)
|
||||
>>> job = q.enqueue(fib, 8)
|
||||
>>> job.result
|
||||
21
|
||||
{% endhighlight %}
|
||||
|
||||
The above code runs without an active worker and executes `fib(8)`
|
||||
synchronously within the same process. You may know this behaviour from Celery
|
||||
as `ALWAYS_EAGER`. Note, however, that you still need a working connection to
|
||||
a redis instance for storing states related to job execution and completion.
|
||||
|
||||
|
||||
## Job dependencies
|
||||
|
||||
New in RQ 0.4.0 is the ability to chain the execution of multiple jobs.
|
||||
To execute a job that depends on another job, use the `depends_on` argument:
|
||||
|
||||
{% highlight python %}
|
||||
q = Queue('low', connection=my_redis_conn)
|
||||
report_job = q.enqueue(generate_report)
|
||||
q.enqueue(send_report, depends_on=report_job)
|
||||
{% endhighlight %}
|
||||
|
||||
The ability to handle job dependencies allows you to split a big job into
|
||||
several smaller ones. A job that is dependent on another is enqueued only when
|
||||
its dependency finishes *successfully*.
|
||||
|
||||
|
||||
## The worker
|
||||
|
||||
To learn about workers, see the [workers][w] documentation.
|
||||
|
||||
[w]: {{site.baseurl}}workers/
|
||||
|
||||
|
||||
## Considerations for jobs
|
||||
|
||||
Technically, you can put any Python function call on a queue, but that does not
|
||||
mean it's always wise to do so. Some things to consider before putting a job
|
||||
on a queue:
|
||||
|
||||
* Make sure that the function's `__module__` is importable by the worker. In
|
||||
particular, this means that you cannot enqueue functions that are declared in
|
||||
the `__main__` module.
|
||||
* Make sure that the worker and the work generator share _exactly_ the same
|
||||
source code.
|
||||
* Make sure that the function call does not depend on its context. In
|
||||
particular, global variables are evil (as always), but also _any_ state that
|
||||
the function depends on (for example a "current" user or "current" web
|
||||
request) is not there when the worker will process it. If you want work done
|
||||
for the "current" user, you should resolve that user to a concrete instance
|
||||
and pass a reference to that user object to the job as an argument.
|
||||
|
||||
|
||||
## Limitations
|
||||
|
||||
RQ workers will only run on systems that implement `fork()`. Most notably,
|
||||
this means it is not possible to run the workers on Windows.
|
||||
|
||||
|
||||
[m]: http://pypi.python.org/pypi/mailer
|
||||
[p]: http://docs.python.org/library/pickle.html
|
@ -0,0 +1,95 @@
|
||||
---
|
||||
title: "RQ: Documentation"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
For some use cases it might be useful have access to the current job ID or
|
||||
instance from within the job function itself. Or to store arbitrary data on
|
||||
jobs.
|
||||
|
||||
|
||||
## Accessing the "current" job
|
||||
|
||||
_New in version 0.3.3._
|
||||
|
||||
Since job functions are regular Python functions, you have to ask RQ for the
|
||||
current job ID, if any. To do this, you can use:
|
||||
|
||||
{% highlight python %}
|
||||
from rq import get_current_job
|
||||
|
||||
def add(x, y):
|
||||
job = get_current_job()
|
||||
print 'Current job: %s' % (job.id,)
|
||||
return x + y
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Storing arbitrary data on jobs
|
||||
|
||||
_Improved in 0.8.0._
|
||||
|
||||
To add/update custom status information on this job, you have access to the
|
||||
`meta` property, which allows you to store arbitrary pickleable data on the job
|
||||
itself:
|
||||
|
||||
{% highlight python %}
|
||||
import socket
|
||||
|
||||
def add(x, y):
|
||||
job = get_current_job()
|
||||
job.meta['handled_by'] = socket.gethostname()
|
||||
job.save_meta()
|
||||
|
||||
# do more work
|
||||
time.sleep(1)
|
||||
return x + y
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Time to live for job in queue
|
||||
|
||||
_New in version 0.4.7._
|
||||
|
||||
A job has two TTLs, one for the job result and one for the job itself. This means that if you have
|
||||
job that shouldn't be executed after a certain amount of time, you can define a TTL as such:
|
||||
|
||||
{% highlight python %}
|
||||
# When creating the job:
|
||||
job = Job.create(func=say_hello, ttl=43)
|
||||
|
||||
# or when queueing a new job:
|
||||
job = q.enqueue(count_words_at_url, 'http://nvie.com', ttl=43)
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Failed Jobs
|
||||
|
||||
If a job fails and raises an exception, the worker will put the job in a failed job queue.
|
||||
On the Job instance, the `is_failed` property will be true. To fetch all failed jobs, scan
|
||||
through the `get_failed_queue()` queue.
|
||||
|
||||
{% highlight python %}
|
||||
from redis import StrictRedis
|
||||
from rq import push_connection, get_failed_queue, Queue
|
||||
from rq.job import Job
|
||||
|
||||
|
||||
con = StrictRedis()
|
||||
push_connection(con)
|
||||
|
||||
def div_by_zero(x):
|
||||
return x / 0
|
||||
|
||||
job = Job.create(func=div_by_zero, args=(1, 2, 3))
|
||||
job.origin = 'fake'
|
||||
job.save()
|
||||
fq = get_failed_queue()
|
||||
fq.quarantine(job, Exception('Some fake error'))
|
||||
assert fq.count == 1
|
||||
|
||||
fq.requeue(job.id)
|
||||
|
||||
assert fq.count == 0
|
||||
assert Queue('fake').count == 1
|
||||
{% endhighlight %}
|
@ -0,0 +1,115 @@
|
||||
---
|
||||
title: "RQ: Documentation"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
Enqueueing jobs is delayed execution of function calls. This means we're
|
||||
solving a problem, but are getting back a few in return.
|
||||
|
||||
|
||||
## Dealing with results
|
||||
|
||||
Python functions may have return values, so jobs can have them, too. If a job
|
||||
returns a non-`None` return value, the worker will write that return value back
|
||||
to the job's Redis hash under the `result` key. The job's Redis hash itself
|
||||
will expire after 500 seconds by default after the job is finished.
|
||||
|
||||
The party that enqueued the job gets back a `Job` instance as a result of the
|
||||
enqueueing itself. Such a `Job` object is a proxy object that is tied to the
|
||||
job's ID, to be able to poll for results.
|
||||
|
||||
|
||||
**On the return value's TTL**
|
||||
Return values are written back to Redis with a limited lifetime (via a Redis
|
||||
expiring key), which is merely to avoid ever-growing Redis databases.
|
||||
|
||||
From RQ >= 0.3.1, The TTL value of the job result can be specified using the
|
||||
`result_ttl` keyword argument to `enqueue()` and `enqueue_call()` calls. It
|
||||
can also be used to disable the expiry altogether. You then are responsible
|
||||
for cleaning up jobs yourself, though, so be careful to use that.
|
||||
|
||||
You can do the following:
|
||||
|
||||
q.enqueue(foo) # result expires after 500 secs (the default)
|
||||
q.enqueue(foo, result_ttl=86400) # result expires after 1 day
|
||||
q.enqueue(foo, result_ttl=0) # result gets deleted immediately
|
||||
q.enqueue(foo, result_ttl=-1) # result never expires--you should delete jobs manually
|
||||
|
||||
Additionally, you can use this for keeping around finished jobs without return
|
||||
values, which would be deleted immediately by default.
|
||||
|
||||
q.enqueue(func_without_rv, result_ttl=500) # job kept explicitly
|
||||
|
||||
|
||||
## Dealing with exceptions
|
||||
|
||||
Jobs can fail and throw exceptions. This is a fact of life. RQ deals with
|
||||
this in the following way.
|
||||
|
||||
Job failure is too important not to be noticed and therefore the job's return
|
||||
value should never expire. Furthermore, it should be possible to retry failed
|
||||
jobs. Typically, this is something that needs manual interpretation, since
|
||||
there is no automatic or reliable way of letting RQ judge whether it is safe
|
||||
for certain tasks to be retried or not.
|
||||
|
||||
When an exception is thrown inside a job, it is caught by the worker,
|
||||
serialized and stored under the job's Redis hash's `exc_info` key. A reference
|
||||
to the job is put on the `failed` queue.
|
||||
|
||||
The job itself has some useful properties that can be used to aid inspection:
|
||||
|
||||
* the original creation time of the job
|
||||
* the last enqueue date
|
||||
* the originating queue
|
||||
* a textual description of the desired function invocation
|
||||
* the exception information
|
||||
|
||||
This makes it possible to inspect and interpret the problem manually and
|
||||
possibly resubmit the job.
|
||||
|
||||
|
||||
## Dealing with interruption
|
||||
|
||||
When workers get killed in the polite way (Ctrl+C or `kill`), RQ tries hard not
|
||||
to lose any work. The current work is finished after which the worker will
|
||||
stop further processing of jobs. This ensures that jobs always get a fair
|
||||
change to finish themselves.
|
||||
|
||||
However, workers can be killed forcefully by `kill -9`, which will not give the
|
||||
workers a chance to finish the job gracefully or to put the job on the `failed`
|
||||
queue. Therefore, killing a worker forcefully could potentially lead to
|
||||
damage.
|
||||
|
||||
Just sayin'.
|
||||
|
||||
|
||||
## Dealing with job timeouts
|
||||
|
||||
By default, jobs should execute within 180 seconds. After that, the worker
|
||||
kills the work horse and puts the job onto the `failed` queue, indicating the
|
||||
job timed out.
|
||||
|
||||
If a job requires more (or less) time to complete, the default timeout period
|
||||
can be loosened (or tightened), by specifying it as a keyword argument to the
|
||||
`enqueue()` call, like so:
|
||||
|
||||
{% highlight python %}
|
||||
q = Queue()
|
||||
q.enqueue(mytask, args=(foo,), kwargs={'bar': qux}, timeout=600) # 10 mins
|
||||
{% endhighlight %}
|
||||
|
||||
You can also change the default timeout for jobs that are enqueued via specific
|
||||
queue instances at once, which can be useful for patterns like this:
|
||||
|
||||
{% highlight python %}
|
||||
# High prio jobs should end in 8 secs, while low prio
|
||||
# work may take up to 10 mins
|
||||
high = Queue('high', default_timeout=8) # 8 secs
|
||||
low = Queue('low', default_timeout=600) # 10 mins
|
||||
|
||||
# Individual jobs can still override these defaults
|
||||
low.enqueue(really_really_slow, timeout=3600) # 1 hr
|
||||
{% endhighlight %}
|
||||
|
||||
Individual jobs can still specify an alternative timeout, as workers will
|
||||
respect these.
|
@ -0,0 +1,41 @@
|
||||
---
|
||||
title: "RQ: Testing"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
## Workers inside unit tests
|
||||
|
||||
You may wish to include your RQ tasks inside unit tests. However many frameworks (such as Django) use in-memory databases which do not play nicely with the default `fork()` behaviour of RQ.
|
||||
|
||||
Therefore, you must use the SimpleWorker class to avoid fork();
|
||||
|
||||
{% highlight python %}
|
||||
from redis import Redis
|
||||
from rq import SimpleWorker, Queue
|
||||
|
||||
queue = Queue(connection=Redis())
|
||||
queue.enqueue(my_long_running_job)
|
||||
worker = SimpleWorker([queue], connection=queue.connection)
|
||||
worker.work(burst=True) # Runs enqueued job
|
||||
# Check for result...
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Running Jobs in unit tests
|
||||
|
||||
Another solution for testing purposes is to use the `async=False` queue
|
||||
parameter, that instructs it to instantly perform the job in the same
|
||||
thread instead of dispatching it to the workers. Workers are not required
|
||||
anymore.
|
||||
Additionally, we can use fakeredis to mock a redis instance, so we don't have to
|
||||
run a redis server separately. The instance of the fake redis server can
|
||||
be directly passed as the connection argument to the queue:
|
||||
|
||||
{% highlight python %}
|
||||
from fakeredis import FakeStrictRedis
|
||||
from rq import Queue
|
||||
|
||||
queue = Queue(async=False, connection=FakeStrictRedis())
|
||||
job = queue.enqueue(my_long_running_job)
|
||||
assert job.is_finished
|
||||
{% endhighlight %}
|
@ -0,0 +1,287 @@
|
||||
---
|
||||
title: "RQ: Simple job queues for Python"
|
||||
layout: docs
|
||||
---
|
||||
|
||||
A worker is a Python process that typically runs in the background and exists
|
||||
solely as a work horse to perform lengthy or blocking tasks that you don't want
|
||||
to perform inside web processes.
|
||||
|
||||
|
||||
## Starting workers
|
||||
|
||||
To start crunching work, simply start a worker from the root of your project
|
||||
directory:
|
||||
|
||||
{% highlight console %}
|
||||
$ rq worker high normal low
|
||||
*** Listening for work on high, normal, low
|
||||
Got send_newsletter('me@nvie.com') from default
|
||||
Job ended normally without result
|
||||
*** Listening for work on high, normal, low
|
||||
...
|
||||
{% endhighlight %}
|
||||
|
||||
Workers will read jobs from the given queues (the order is important) in an
|
||||
endless loop, waiting for new work to arrive when all jobs are done.
|
||||
|
||||
Each worker will process a single job at a time. Within a worker, there is no
|
||||
concurrent processing going on. If you want to perform jobs concurrently,
|
||||
simply start more workers.
|
||||
|
||||
|
||||
### Burst mode
|
||||
|
||||
By default, workers will start working immediately and will block and wait for
|
||||
new work when they run out of work. Workers can also be started in _burst
|
||||
mode_ to finish all currently available work and quit as soon as all given
|
||||
queues are emptied.
|
||||
|
||||
{% highlight console %}
|
||||
$ rq worker --burst high normal low
|
||||
*** Listening for work on high, normal, low
|
||||
Got send_newsletter('me@nvie.com') from default
|
||||
Job ended normally without result
|
||||
No more work, burst finished.
|
||||
Registering death.
|
||||
{% endhighlight %}
|
||||
|
||||
This can be useful for batch work that needs to be processed periodically, or
|
||||
just to scale up your workers temporarily during peak periods.
|
||||
|
||||
|
||||
## Inside the worker
|
||||
|
||||
### The worker life-cycle
|
||||
|
||||
The life-cycle of a worker consists of a few phases:
|
||||
|
||||
1. _Boot_. Loading the Python environment.
|
||||
2. _Birth registration_. The worker registers itself to the system so it knows
|
||||
of this worker.
|
||||
3. _Start listening_. A job is popped from any of the given Redis queues.
|
||||
If all queues are empty and the worker is running in burst mode, quit now.
|
||||
Else, wait until jobs arrive.
|
||||
4. _Prepare job execution_. The worker tells the system that it will begin work
|
||||
by setting its status to `busy` and registers job in the `StartedJobRegistry`.
|
||||
5. _Fork a child process._
|
||||
A child process (the "work horse") is forked off to do the actual work in
|
||||
a fail-safe context.
|
||||
6. _Process work_. This performs the actual job work in the work horse.
|
||||
7. _Cleanup job execution_. The worker sets its status to `idle` and sets both
|
||||
the job and its result to expire based on `result_ttl`. Job is also removed
|
||||
from `StartedJobRegistry` and added to to `FinishedJobRegistry` in the case
|
||||
of successful execution, or `FailedQueue` in the case of failure.
|
||||
8. _Loop_. Repeat from step 3.
|
||||
|
||||
|
||||
## Performance notes
|
||||
|
||||
Basically the `rq worker` shell script is a simple fetch-fork-execute loop.
|
||||
When a lot of your jobs do lengthy setups, or they all depend on the same set
|
||||
of modules, you pay this overhead each time you run a job (since you're doing
|
||||
the import _after_ the moment of forking). This is clean, because RQ won't
|
||||
ever leak memory this way, but also slow.
|
||||
|
||||
A pattern you can use to improve the throughput performance for these kind of
|
||||
jobs can be to import the necessary modules _before_ the fork. There is no way
|
||||
of telling RQ workers to perform this set up for you, but you can do it
|
||||
yourself before starting the work loop.
|
||||
|
||||
To do this, provide your own worker script (instead of using `rq worker`).
|
||||
A simple implementation example:
|
||||
|
||||
{% highlight python %}
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
from rq import Connection, Worker
|
||||
|
||||
# Preload libraries
|
||||
import library_that_you_want_preloaded
|
||||
|
||||
# Provide queue names to listen to as arguments to this script,
|
||||
# similar to rq worker
|
||||
with Connection():
|
||||
qs = sys.argv[1:] or ['default']
|
||||
|
||||
w = Worker(qs)
|
||||
w.work()
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
### Worker names
|
||||
|
||||
Workers are registered to the system under their names, see [monitoring][m].
|
||||
By default, the name of a worker is equal to the concatenation of the current
|
||||
hostname and the current PID. To override this default, specify the name when
|
||||
starting the worker, using the `--name` option.
|
||||
|
||||
[m]: /docs/monitoring/
|
||||
|
||||
|
||||
### Retrieving worker information
|
||||
|
||||
`Worker` instances store their runtime information in Redis. Here's how to
|
||||
retrieve them:
|
||||
|
||||
{% highlight python %}
|
||||
from redis import Redis
|
||||
from rq import Queue, Worker
|
||||
|
||||
# Returns all workers registered in this connection
|
||||
redis = Redis()
|
||||
workers = Worker.all(connection=redis)
|
||||
|
||||
# Returns all workers in this queue (new in version 0.10.0)
|
||||
queue = Queue('queue_name')
|
||||
workers = Worker.all(queue=queue)
|
||||
{% endhighlight %}
|
||||
|
||||
_New in version 0.10.0._
|
||||
|
||||
If you only want to know the number of workers for monitoring purposes, using
|
||||
`Worker.count()` is much more performant.
|
||||
|
||||
{% highlight python %}
|
||||
from redis import Redis
|
||||
from rq import Worker
|
||||
|
||||
redis = Redis()
|
||||
|
||||
# Count the number of workers in this Redis connection
|
||||
workers = Worker.count(connection=redis)
|
||||
|
||||
# Count the number of workers for a specific queue
|
||||
queue = Queue('queue_name', connection=redis)
|
||||
workers = Worker.all(queue=queue)
|
||||
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
### Worker statistics
|
||||
|
||||
_New in version 0.9.0._
|
||||
|
||||
If you want to check the utilization of your queues, `Worker` instances
|
||||
store a few useful information:
|
||||
|
||||
{% highlight python %}
|
||||
from rq.worker import Worker
|
||||
worker = Worker.find_by_key('rq:worker:name')
|
||||
|
||||
worker.successful_job_count # Number of jobs finished successfully
|
||||
worker.failed_job_count. # Number of failed jobs processed by this worker
|
||||
worker.total_working_time # Number of time spent executing jobs
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Taking down workers
|
||||
|
||||
If, at any time, the worker receives `SIGINT` (via Ctrl+C) or `SIGTERM` (via
|
||||
`kill`), the worker wait until the currently running task is finished, stop
|
||||
the work loop and gracefully register its own death.
|
||||
|
||||
If, during this takedown phase, `SIGINT` or `SIGTERM` is received again, the
|
||||
worker will forcefully terminate the child process (sending it `SIGKILL`), but
|
||||
will still try to register its own death.
|
||||
|
||||
|
||||
## Using a config file
|
||||
|
||||
_New in version 0.3.2._
|
||||
|
||||
If you'd like to configure `rq worker` via a configuration file instead of
|
||||
through command line arguments, you can do this by creating a Python file like
|
||||
`settings.py`:
|
||||
|
||||
{% highlight python %}
|
||||
REDIS_URL = 'redis://localhost:6379/1'
|
||||
|
||||
# You can also specify the Redis DB to use
|
||||
# REDIS_HOST = 'redis.example.com'
|
||||
# REDIS_PORT = 6380
|
||||
# REDIS_DB = 3
|
||||
# REDIS_PASSWORD = 'very secret'
|
||||
|
||||
# Queues to listen on
|
||||
QUEUES = ['high', 'normal', 'low']
|
||||
|
||||
# If you're using Sentry to collect your runtime exceptions, you can use this
|
||||
# to configure RQ for it in a single step
|
||||
# The 'sync+' prefix is required for raven: https://github.com/nvie/rq/issues/350#issuecomment-43592410
|
||||
SENTRY_DSN = 'sync+http://public:secret@example.com/1'
|
||||
{% endhighlight %}
|
||||
|
||||
The example above shows all the options that are currently supported.
|
||||
|
||||
_Note: The_ `QUEUES` _and_ `REDIS_PASSWORD` _settings are new since 0.3.3._
|
||||
|
||||
To specify which module to read settings from, use the `-c` option:
|
||||
|
||||
{% highlight console %}
|
||||
$ rq worker -c settings
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Custom worker classes
|
||||
|
||||
_New in version 0.4.0._
|
||||
|
||||
There are times when you want to customize the worker's behavior. Some of the
|
||||
more common requests so far are:
|
||||
|
||||
1. Managing database connectivity prior to running a job.
|
||||
2. Using a job execution model that does not require `os.fork`.
|
||||
3. The ability to use different concurrency models such as
|
||||
`multiprocessing` or `gevent`.
|
||||
|
||||
You can use the `-w` option to specify a different worker class to use:
|
||||
|
||||
{% highlight console %}
|
||||
$ rq worker -w 'path.to.GeventWorker'
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Custom Job and Queue classes
|
||||
|
||||
_Will be available in next release._
|
||||
|
||||
You can tell the worker to use a custom class for jobs and queues using
|
||||
`--job-class` and/or `--queue-class`.
|
||||
|
||||
{% highlight console %}
|
||||
$ rq worker --job-class 'custom.JobClass' --queue-class 'custom.QueueClass'
|
||||
{% endhighlight %}
|
||||
|
||||
Don't forget to use those same classes when enqueueing the jobs.
|
||||
|
||||
For example:
|
||||
|
||||
{% highlight python %}
|
||||
from rq import Queue
|
||||
from rq.job import Job
|
||||
|
||||
class CustomJob(Job):
|
||||
pass
|
||||
|
||||
class CustomQueue(Queue):
|
||||
job_class = CustomJob
|
||||
|
||||
queue = CustomQueue('default', connection=redis_conn)
|
||||
queue.enqueue(some_func)
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Custom exception handlers
|
||||
|
||||
_New in version 0.5.5._
|
||||
|
||||
If you need to handle errors differently for different types of jobs, or simply want to customize
|
||||
RQ's default error handling behavior, run `rq worker` using the `--exception-handler` option:
|
||||
|
||||
{% highlight console %}
|
||||
$ rq worker --exception-handler 'path.to.my.ErrorHandler'
|
||||
|
||||
# Multiple exception handlers is also supported
|
||||
$ rq worker --exception-handler 'path.to.my.ErrorHandler' --exception-handler 'another.ErrorHandler'
|
||||
{% endhighlight %}
|
After Width: | Height: | Size: 1.4 KiB |
After Width: | Height: | Size: 728 B |
After Width: | Height: | Size: 1023 B |
After Width: | Height: | Size: 55 KiB |
After Width: | Height: | Size: 7.8 KiB |
After Width: | Height: | Size: 6.6 KiB |
After Width: | Height: | Size: 20 KiB |
After Width: | Height: | Size: 7.2 KiB |
@ -0,0 +1,85 @@
|
||||
---
|
||||
title: "RQ: Simple job queues for Python"
|
||||
layout: default
|
||||
---
|
||||
|
||||
RQ (_Redis Queue_) is a simple Python library for queueing jobs and processing
|
||||
them in the background with workers. It is backed by Redis and it is designed
|
||||
to have a low barrier to entry. It can be integrated in your web stack easily.
|
||||
|
||||
RQ requires Redis >= 2.7.0.
|
||||
|
||||
## Getting started
|
||||
|
||||
First, run a Redis server. You can use an existing one. To put jobs on
|
||||
queues, you don't have to do anything special, just define your typically
|
||||
lengthy or blocking function:
|
||||
|
||||
{% highlight python %}
|
||||
import requests
|
||||
|
||||
def count_words_at_url(url):
|
||||
resp = requests.get(url)
|
||||
return len(resp.text.split())
|
||||
{% endhighlight %}
|
||||
|
||||
Then, create a RQ queue:
|
||||
|
||||
{% highlight python %}
|
||||
from redis import Redis
|
||||
from rq import Queue
|
||||
|
||||
q = Queue(connection=Redis())
|
||||
{% endhighlight %}
|
||||
|
||||
And enqueue the function call:
|
||||
|
||||
{% highlight python %}
|
||||
from my_module import count_words_at_url
|
||||
result = q.enqueue(
|
||||
count_words_at_url, 'http://nvie.com')
|
||||
{% endhighlight %}
|
||||
|
||||
For a more complete example, refer to the [docs][d]. But this is the essence.
|
||||
|
||||
[d]: {{site.baseurl}}docs/
|
||||
|
||||
|
||||
### The worker
|
||||
|
||||
To start executing enqueued function calls in the background, start a worker
|
||||
from your project's directory:
|
||||
|
||||
{% highlight console %}
|
||||
$ rq worker
|
||||
*** Listening for work on default
|
||||
Got count_words_at_url('http://nvie.com') from default
|
||||
Job result = 818
|
||||
*** Listening for work on default
|
||||
{% endhighlight %}
|
||||
|
||||
That's about it.
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
Simply use the following command to install the latest released version:
|
||||
|
||||
pip install rq
|
||||
|
||||
If you want the cutting edge version (that may well be broken), use this:
|
||||
|
||||
pip install -e git+git@github.com:nvie/rq.git@master#egg=rq
|
||||
|
||||
|
||||
## Project history
|
||||
|
||||
This project has been inspired by the good parts of [Celery][1], [Resque][2]
|
||||
and [this snippet][3], and has been created as a lightweight alternative to
|
||||
existing queueing frameworks, with a low barrier to entry.
|
||||
|
||||
[m]: http://pypi.python.org/pypi/mailer
|
||||
[p]: http://docs.python.org/library/pickle.html
|
||||
[1]: http://www.celeryproject.org/
|
||||
[2]: https://github.com/defunkt/resque
|
||||
[3]: http://flask.pocoo.org/snippets/73/
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
title: "RQ: Using with Django"
|
||||
layout: patterns
|
||||
---
|
||||
|
||||
## Using RQ with Django
|
||||
|
||||
The simplest way of using RQ with Django is to use
|
||||
[django-rq](https://github.com/ui/django-rq). Follow the instructions in the
|
||||
README.
|
||||
|
||||
### Manually
|
||||
|
||||
In order to use RQ together with Django, you have to start the worker in
|
||||
a "Django context". Possibly, you have to write a custom Django management
|
||||
command to do so. In many cases, however, setting the `DJANGO_SETTINGS_MODULE`
|
||||
environmental variable will already do the trick.
|
||||
|
||||
If `settings.py` is your Django settings file (as it is by default), use this:
|
||||
|
||||
{% highlight console %}
|
||||
$ DJANGO_SETTINGS_MODULE=settings rq worker high default low
|
||||
{% endhighlight %}
|
@ -0,0 +1,69 @@
|
||||
---
|
||||
title: "RQ: Using RQ on Heroku"
|
||||
layout: patterns
|
||||
---
|
||||
|
||||
|
||||
## Using RQ on Heroku
|
||||
|
||||
To setup RQ on [Heroku][1], first add it to your
|
||||
`requirements.txt` file:
|
||||
|
||||
redis==2.10.5
|
||||
rq==0.7.0
|
||||
|
||||
Create a file called `run-worker.py` with the following content (assuming you
|
||||
are using [Redis To Go][2] with Heroku):
|
||||
|
||||
{% highlight python %}
|
||||
import os
|
||||
import urlparse
|
||||
from redis import Redis
|
||||
from rq import Queue, Connection
|
||||
from rq.worker import HerokuWorker as Worker
|
||||
|
||||
listen = ['high', 'default', 'low']
|
||||
|
||||
redis_url = os.getenv('REDISTOGO_URL')
|
||||
if not redis_url:
|
||||
raise RuntimeError('Set up Redis To Go first.')
|
||||
|
||||
urlparse.uses_netloc.append('redis')
|
||||
url = urlparse.urlparse(redis_url)
|
||||
conn = Redis(host=url.hostname, port=url.port, db=0, password=url.password)
|
||||
|
||||
if __name__ == '__main__':
|
||||
with Connection(conn):
|
||||
worker = Worker(map(Queue, listen))
|
||||
worker.work()
|
||||
{% endhighlight %}
|
||||
|
||||
Than, add the command to your `Procfile`:
|
||||
|
||||
worker: python -u run-worker.py
|
||||
|
||||
Now, all you have to do is spin up a worker:
|
||||
|
||||
{% highlight console %}
|
||||
$ heroku scale worker=1
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
## Putting RQ under foreman
|
||||
|
||||
[Foreman][3] is probably the process manager you use when you host your app on
|
||||
Heroku, or just because it's a pretty friendly tool to use in development.
|
||||
|
||||
When using RQ under `foreman`, you may experience that the workers are a bit
|
||||
quiet sometimes. This is because of Python buffering the output, so `foreman`
|
||||
cannot (yet) echo it. Here's a related [Wiki page][4].
|
||||
|
||||
Just change the way you run your worker process, by adding the `-u` option (to
|
||||
force stdin, stdout and stderr to be totally unbuffered):
|
||||
|
||||
worker: python -u run-worker.py
|
||||
|
||||
[1]: https://heroku.com
|
||||
[2]: https://devcenter.heroku.com/articles/redistogo
|
||||
[3]: https://github.com/ddollar/foreman
|
||||
[4]: https://github.com/ddollar/foreman/wiki/Missing-Output
|
@ -0,0 +1,47 @@
|
||||
---
|
||||
title: "RQ: Sending exceptions to Sentry"
|
||||
layout: patterns
|
||||
---
|
||||
|
||||
## Sending exceptions to Sentry
|
||||
|
||||
[Sentry](https://www.getsentry.com/) is a popular exception gathering service
|
||||
that RQ supports integrating with since version 0.3.1, through its custom
|
||||
exception handlers.
|
||||
|
||||
RQ includes a convenience function that registers your existing Sentry client
|
||||
to send all exceptions to.
|
||||
|
||||
An example:
|
||||
|
||||
{% highlight python %}
|
||||
from raven import Client
|
||||
from raven.transport.http import HTTPTransport
|
||||
from rq.contrib.sentry import register_sentry
|
||||
|
||||
client = Client('<YOUR_DSN>', transport=HTTPTransport)
|
||||
register_sentry(client, worker)
|
||||
{% endhighlight %}
|
||||
|
||||
Where `worker` is your RQ worker instance. After that, call `worker.work(...)`
|
||||
to start the worker. All exceptions that occur are reported to Sentry
|
||||
automatically.
|
||||
|
||||
<div class="warning" style="margin-top: 20px">
|
||||
<img style="float: right; margin-right: -60px; margin-top: -38px" src="{{site.baseurl}}img/warning.png" />
|
||||
<strong>Note:</strong>
|
||||
<p>
|
||||
Error delivery to Sentry is known to be unreliable with RQ when using
|
||||
async transports (the default is). So you are encouraged to use the
|
||||
<code>HTTPTransport</code> or <code>RequestsHTTPTransport</code> when
|
||||
creating your client. See the code sample above, or the <a
|
||||
href="http://raven.readthedocs.org/en/latest/transports/index.html">Raven
|
||||
documentation</a>.
|
||||
</p>
|
||||
<p>
|
||||
For more info, see the
|
||||
<a href="http://raven.readthedocs.org/en/latest/transports/index.html#transports">Raven docs</a>.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
Read more on RQ's [custom exception handling](/docs/exceptions/) capabilities.
|
@ -0,0 +1,76 @@
|
||||
---
|
||||
title: "Putting RQ under supervisor"
|
||||
layout: patterns
|
||||
---
|
||||
|
||||
## Putting RQ under supervisor
|
||||
|
||||
[Supervisor][1] is a popular tool for managing long-running processes in
|
||||
production environments. It can automatically restart any crashed processes,
|
||||
and you gain a single dashboard for all of the running processes that make up
|
||||
your product.
|
||||
|
||||
RQ can be used in combination with supervisor easily. You'd typically want to
|
||||
use the following supervisor settings:
|
||||
|
||||
{% highlight ini %}
|
||||
[program:myworker]
|
||||
; Point the command to the specific rq command you want to run.
|
||||
; If you use virtualenv, be sure to point it to
|
||||
; /path/to/virtualenv/bin/rq
|
||||
; Also, you probably want to include a settings module to configure this
|
||||
; worker. For more info on that, see http://python-rq.org/docs/workers/
|
||||
command=/path/to/rq worker -c mysettings high normal low
|
||||
; process_num is required if you specify >1 numprocs
|
||||
process_name=%(program_name)s-%(process_num)s
|
||||
|
||||
; If you want to run more than one worker instance, increase this
|
||||
numprocs=1
|
||||
|
||||
; This is the directory from which RQ is ran. Be sure to point this to the
|
||||
; directory where your source code is importable from
|
||||
directory=/path/to
|
||||
|
||||
; RQ requires the TERM signal to perform a warm shutdown. If RQ does not die
|
||||
; within 10 seconds, supervisor will forcefully kill it
|
||||
stopsignal=TERM
|
||||
|
||||
; These are up to you
|
||||
autostart=true
|
||||
autorestart=true
|
||||
{% endhighlight %}
|
||||
|
||||
### Conda environments
|
||||
|
||||
[Conda][2] virtualenvs can be used for RQ jobs which require non-Python
|
||||
dependencies. You can use a similar approach as with regular virtualenvs.
|
||||
|
||||
{% highlight ini %}
|
||||
[program:myworker]
|
||||
; Point the command to the specific rq command you want to run.
|
||||
; For conda virtual environments, install RQ into your env.
|
||||
; Also, you probably want to include a settings module to configure this
|
||||
; worker. For more info on that, see http://python-rq.org/docs/workers/
|
||||
environment=PATH='/opt/conda/envs/myenv/bin'
|
||||
command=/opt/conda/envs/myenv/bin/rq worker -c mysettings high normal low
|
||||
; process_num is required if you specify >1 numprocs
|
||||
process_name=%(program_name)s-%(process_num)s
|
||||
|
||||
; If you want to run more than one worker instance, increase this
|
||||
numprocs=1
|
||||
|
||||
; This is the directory from which RQ is ran. Be sure to point this to the
|
||||
; directory where your source code is importable from
|
||||
directory=/path/to
|
||||
|
||||
; RQ requires the TERM signal to perform a warm shutdown. If RQ does not die
|
||||
; within 10 seconds, supervisor will forcefully kill it
|
||||
stopsignal=TERM
|
||||
|
||||
; These are up to you
|
||||
autostart=true
|
||||
autorestart=true
|
||||
{% endhighlight %}
|
||||
|
||||
[1]: http://supervisord.org/
|
||||
[2]: https://conda.io/docs/
|